diff --git a/sys/compat/linux/linux_socket.c b/sys/compat/linux/linux_socket.c index 2893e93bbcd7..f474ea06440a 100644 --- a/sys/compat/linux/linux_socket.c +++ b/sys/compat/linux/linux_socket.c @@ -1,2741 +1,2742 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1995 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #endif #ifdef COMPAT_LINUX32 #include #include #include #else #include #include #endif #include #include #include #include #include #include #include _Static_assert(offsetof(struct l_ifreq, ifr_ifru) == offsetof(struct ifreq, ifr_ifru), "Linux ifreq members names should be equal to FreeeBSD"); _Static_assert(offsetof(struct l_ifreq, ifr_index) == offsetof(struct ifreq, ifr_index), "Linux ifreq members names should be equal to FreeeBSD"); _Static_assert(offsetof(struct l_ifreq, ifr_name) == offsetof(struct ifreq, ifr_name), "Linux ifreq members names should be equal to FreeeBSD"); #define SECURITY_CONTEXT_STRING "unconfined" static int linux_sendmsg_common(struct thread *, l_int, struct l_msghdr *, l_uint); static int linux_recvmsg_common(struct thread *, l_int, struct l_msghdr *, l_uint, struct msghdr *); static int linux_set_socket_flags(int, int *); #define SOL_NETLINK 270 static int linux_to_bsd_sockopt_level(int level) { if (level == LINUX_SOL_SOCKET) return (SOL_SOCKET); /* Remaining values are RFC-defined protocol numbers. */ return (level); } static int bsd_to_linux_sockopt_level(int level) { if (level == SOL_SOCKET) return (LINUX_SOL_SOCKET); return (level); } static int linux_to_bsd_ip_sockopt(int opt) { switch (opt) { /* known and translated sockopts */ case LINUX_IP_TOS: return (IP_TOS); case LINUX_IP_TTL: return (IP_TTL); case LINUX_IP_HDRINCL: return (IP_HDRINCL); case LINUX_IP_OPTIONS: return (IP_OPTIONS); case LINUX_IP_RECVOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_RECVOPTS"); return (IP_RECVOPTS); case LINUX_IP_RETOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_REETOPTS"); return (IP_RETOPTS); case LINUX_IP_RECVTTL: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_RECVTTL"); return (IP_RECVTTL); case LINUX_IP_RECVTOS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_RECVTOS"); return (IP_RECVTOS); case LINUX_IP_FREEBIND: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_FREEBIND"); return (IP_BINDANY); case LINUX_IP_IPSEC_POLICY: /* we have this option, but not documented in ip(4) manpage */ LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_IPSEC_POLICY"); return (IP_IPSEC_POLICY); case LINUX_IP_MINTTL: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_MINTTL"); return (IP_MINTTL); case LINUX_IP_MULTICAST_IF: return (IP_MULTICAST_IF); case LINUX_IP_MULTICAST_TTL: return (IP_MULTICAST_TTL); case LINUX_IP_MULTICAST_LOOP: return (IP_MULTICAST_LOOP); case LINUX_IP_ADD_MEMBERSHIP: return (IP_ADD_MEMBERSHIP); case LINUX_IP_DROP_MEMBERSHIP: return (IP_DROP_MEMBERSHIP); case LINUX_IP_UNBLOCK_SOURCE: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_UNBLOCK_SOURCE"); return (IP_UNBLOCK_SOURCE); case LINUX_IP_BLOCK_SOURCE: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_BLOCK_SOURCE"); return (IP_BLOCK_SOURCE); case LINUX_IP_ADD_SOURCE_MEMBERSHIP: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_ADD_SOURCE_MEMBERSHIP"); return (IP_ADD_SOURCE_MEMBERSHIP); case LINUX_IP_DROP_SOURCE_MEMBERSHIP: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_DROP_SOURCE_MEMBERSHIP"); return (IP_DROP_SOURCE_MEMBERSHIP); case LINUX_MCAST_JOIN_GROUP: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_MCAST_JOIN_GROUP"); return (MCAST_JOIN_GROUP); case LINUX_MCAST_LEAVE_GROUP: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_MCAST_LEAVE_GROUP"); return (MCAST_LEAVE_GROUP); case LINUX_MCAST_JOIN_SOURCE_GROUP: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_MCAST_JOIN_SOURCE_GROUP"); return (MCAST_JOIN_SOURCE_GROUP); case LINUX_MCAST_LEAVE_SOURCE_GROUP: LINUX_RATELIMIT_MSG_NOTTESTED("IPv4 socket option IP_MCAST_LEAVE_SOURCE_GROUP"); return (MCAST_LEAVE_SOURCE_GROUP); case LINUX_IP_RECVORIGDSTADDR: return (IP_RECVORIGDSTADDR); /* known but not implemented sockopts */ case LINUX_IP_ROUTER_ALERT: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_ROUTER_ALERT (%d), you can not do user-space routing from linux programs", opt); return (-2); case LINUX_IP_PKTINFO: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_PKTINFO (%d), you can not get extended packet info for datagram sockets in linux programs", opt); return (-2); case LINUX_IP_PKTOPTIONS: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_PKTOPTIONS (%d)", opt); return (-2); case LINUX_IP_MTU_DISCOVER: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_MTU_DISCOVER (%d), your linux program can not control path-MTU discovery", opt); return (-2); case LINUX_IP_RECVERR: /* needed by steam */ LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_RECVERR (%d), you can not get extended reliability info in linux programs", opt); return (-2); case LINUX_IP_MTU: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_MTU (%d), your linux program can not control the MTU on this socket", opt); return (-2); case LINUX_IP_XFRM_POLICY: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_XFRM_POLICY (%d)", opt); return (-2); case LINUX_IP_PASSSEC: /* needed by steam */ LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_PASSSEC (%d), you can not get IPSEC related credential information associated with this socket in linux programs -- if you do not use IPSEC, you can ignore this", opt); return (-2); case LINUX_IP_TRANSPARENT: /* IP_BINDANY or more? */ LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_TRANSPARENT (%d), you can not enable transparent proxying in linux programs -- note, IP_FREEBIND is supported, no idea if the FreeBSD IP_BINDANY is equivalent to the Linux IP_TRANSPARENT or not, any info is welcome", opt); return (-2); case LINUX_IP_NODEFRAG: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_NODEFRAG (%d)", opt); return (-2); case LINUX_IP_CHECKSUM: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_CHECKSUM (%d)", opt); return (-2); case LINUX_IP_BIND_ADDRESS_NO_PORT: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_BIND_ADDRESS_NO_PORT (%d)", opt); return (-2); case LINUX_IP_RECVFRAGSIZE: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_RECVFRAGSIZE (%d)", opt); return (-2); case LINUX_MCAST_MSFILTER: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_MCAST_MSFILTER (%d)", opt); return (-2); case LINUX_IP_MULTICAST_ALL: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_MULTICAST_ALL (%d), your linux program will not see all multicast groups joined by the entire system, only those the program joined itself on this socket", opt); return (-2); case LINUX_IP_UNICAST_IF: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv4 socket option IP_UNICAST_IF (%d)", opt); return (-2); /* unknown sockopts */ default: return (-1); } } static int linux_to_bsd_ip6_sockopt(int opt) { switch (opt) { /* known and translated sockopts */ case LINUX_IPV6_2292PKTINFO: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_2292PKTINFO"); return (IPV6_2292PKTINFO); case LINUX_IPV6_2292HOPOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_2292HOPOPTS"); return (IPV6_2292HOPOPTS); case LINUX_IPV6_2292DSTOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_2292DSTOPTS"); return (IPV6_2292DSTOPTS); case LINUX_IPV6_2292RTHDR: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_2292RTHDR"); return (IPV6_2292RTHDR); case LINUX_IPV6_2292PKTOPTIONS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_2292PKTOPTIONS"); return (IPV6_2292PKTOPTIONS); case LINUX_IPV6_CHECKSUM: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_CHECKSUM"); return (IPV6_CHECKSUM); case LINUX_IPV6_2292HOPLIMIT: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_2292HOPLIMIT"); return (IPV6_2292HOPLIMIT); case LINUX_IPV6_NEXTHOP: return (IPV6_NEXTHOP); case LINUX_IPV6_UNICAST_HOPS: return (IPV6_UNICAST_HOPS); case LINUX_IPV6_MULTICAST_IF: return (IPV6_MULTICAST_IF); case LINUX_IPV6_MULTICAST_HOPS: return (IPV6_MULTICAST_HOPS); case LINUX_IPV6_MULTICAST_LOOP: return (IPV6_MULTICAST_LOOP); case LINUX_IPV6_ADD_MEMBERSHIP: return (IPV6_JOIN_GROUP); case LINUX_IPV6_DROP_MEMBERSHIP: return (IPV6_LEAVE_GROUP); case LINUX_IPV6_V6ONLY: return (IPV6_V6ONLY); case LINUX_IPV6_IPSEC_POLICY: /* we have this option, but not documented in ip6(4) manpage */ LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_IPSEC_POLICY"); return (IPV6_IPSEC_POLICY); case LINUX_MCAST_JOIN_GROUP: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_JOIN_GROUP"); return (IPV6_JOIN_GROUP); case LINUX_MCAST_LEAVE_GROUP: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_LEAVE_GROUP"); return (IPV6_LEAVE_GROUP); case LINUX_IPV6_RECVPKTINFO: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_RECVPKTINFO"); return (IPV6_RECVPKTINFO); case LINUX_IPV6_PKTINFO: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_PKTINFO"); return (IPV6_PKTINFO); case LINUX_IPV6_RECVHOPLIMIT: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_RECVHOPLIMIT"); return (IPV6_RECVHOPLIMIT); case LINUX_IPV6_HOPLIMIT: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_HOPLIMIT"); return (IPV6_HOPLIMIT); case LINUX_IPV6_RECVHOPOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_RECVHOPOPTS"); return (IPV6_RECVHOPOPTS); case LINUX_IPV6_HOPOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_HOPOPTS"); return (IPV6_HOPOPTS); case LINUX_IPV6_RTHDRDSTOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_RTHDRDSTOPTS"); return (IPV6_RTHDRDSTOPTS); case LINUX_IPV6_RECVRTHDR: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_RECVRTHDR"); return (IPV6_RECVRTHDR); case LINUX_IPV6_RTHDR: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_RTHDR"); return (IPV6_RTHDR); case LINUX_IPV6_RECVDSTOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_RECVDSTOPTS"); return (IPV6_RECVDSTOPTS); case LINUX_IPV6_DSTOPTS: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_DSTOPTS"); return (IPV6_DSTOPTS); case LINUX_IPV6_RECVPATHMTU: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_RECVPATHMTU"); return (IPV6_RECVPATHMTU); case LINUX_IPV6_PATHMTU: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_PATHMTU"); return (IPV6_PATHMTU); case LINUX_IPV6_DONTFRAG: return (IPV6_DONTFRAG); case LINUX_IPV6_AUTOFLOWLABEL: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_AUTOFLOWLABEL"); return (IPV6_AUTOFLOWLABEL); case LINUX_IPV6_ORIGDSTADDR: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_ORIGDSTADDR"); return (IPV6_ORIGDSTADDR); case LINUX_IPV6_FREEBIND: LINUX_RATELIMIT_MSG_NOTTESTED("IPv6 socket option IPV6_FREEBIND"); return (IPV6_BINDANY); /* known but not implemented sockopts */ case LINUX_IPV6_ADDRFORM: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_ADDRFORM (%d), you linux program can not convert the socket to IPv4", opt); return (-2); case LINUX_IPV6_AUTHHDR: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_AUTHHDR (%d), your linux program can not get the authentication header info of IPv6 packets", opt); return (-2); case LINUX_IPV6_FLOWINFO: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_FLOWINFO (%d), your linux program can not get the flowid of IPv6 packets", opt); return (-2); case LINUX_IPV6_ROUTER_ALERT: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_ROUTER_ALERT (%d), you can not do user-space routing from linux programs", opt); return (-2); case LINUX_IPV6_MTU_DISCOVER: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_MTU_DISCOVER (%d), your linux program can not control path-MTU discovery", opt); return (-2); case LINUX_IPV6_MTU: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_MTU (%d), your linux program can not control the MTU on this socket", opt); return (-2); case LINUX_IPV6_JOIN_ANYCAST: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_JOIN_ANYCAST (%d)", opt); return (-2); case LINUX_IPV6_LEAVE_ANYCAST: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_LEAVE_ANYCAST (%d)", opt); return (-2); case LINUX_IPV6_MULTICAST_ALL: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_MULTICAST_ALL (%d)", opt); return (-2); case LINUX_IPV6_ROUTER_ALERT_ISOLATE: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_ROUTER_ALERT_ISOLATE (%d)", opt); return (-2); case LINUX_IPV6_FLOWLABEL_MGR: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_FLOWLABEL_MGR (%d)", opt); return (-2); case LINUX_IPV6_FLOWINFO_SEND: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_FLOWINFO_SEND (%d)", opt); return (-2); case LINUX_IPV6_XFRM_POLICY: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_XFRM_POLICY (%d)", opt); return (-2); case LINUX_IPV6_HDRINCL: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_HDRINCL (%d)", opt); return (-2); case LINUX_MCAST_BLOCK_SOURCE: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option MCAST_BLOCK_SOURCE (%d), your linux program may see more multicast stuff than it wants", opt); return (-2); case LINUX_MCAST_UNBLOCK_SOURCE: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option MCAST_UNBLOCK_SOURCE (%d), your linux program may not see all the multicast stuff it wants", opt); return (-2); case LINUX_MCAST_JOIN_SOURCE_GROUP: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option MCAST_JOIN_SOURCE_GROUP (%d), your linux program is not able to join a multicast source group", opt); return (-2); case LINUX_MCAST_LEAVE_SOURCE_GROUP: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option MCAST_LEAVE_SOURCE_GROUP (%d), your linux program is not able to leave a multicast source group -- but it was also not able to join one, so no issue", opt); return (-2); case LINUX_MCAST_MSFILTER: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option MCAST_MSFILTER (%d), your linux program can not manipulate the multicast filter, it may see more multicast data than it wants to see", opt); return (-2); case LINUX_IPV6_ADDR_PREFERENCES: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_ADDR_PREFERENCES (%d)", opt); return (-2); case LINUX_IPV6_MINHOPCOUNT: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_MINHOPCOUNT (%d)", opt); return (-2); case LINUX_IPV6_TRANSPARENT: /* IP_BINDANY or more? */ LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_TRANSPARENT (%d), you can not enable transparent proxying in linux programs -- note, IP_FREEBIND is supported, no idea if the FreeBSD IP_BINDANY is equivalent to the Linux IP_TRANSPARENT or not, any info is welcome", opt); return (-2); case LINUX_IPV6_UNICAST_IF: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_UNICAST_IF (%d)", opt); return (-2); case LINUX_IPV6_RECVFRAGSIZE: LINUX_RATELIMIT_MSG_OPT1( "unsupported IPv6 socket option IPV6_RECVFRAGSIZE (%d)", opt); return (-2); /* unknown sockopts */ default: return (-1); } } static int linux_to_bsd_so_sockopt(int opt) { switch (opt) { case LINUX_SO_DEBUG: return (SO_DEBUG); case LINUX_SO_REUSEADDR: return (SO_REUSEADDR); case LINUX_SO_TYPE: return (SO_TYPE); case LINUX_SO_ERROR: return (SO_ERROR); case LINUX_SO_DONTROUTE: return (SO_DONTROUTE); case LINUX_SO_BROADCAST: return (SO_BROADCAST); case LINUX_SO_SNDBUF: case LINUX_SO_SNDBUFFORCE: return (SO_SNDBUF); case LINUX_SO_RCVBUF: case LINUX_SO_RCVBUFFORCE: return (SO_RCVBUF); case LINUX_SO_KEEPALIVE: return (SO_KEEPALIVE); case LINUX_SO_OOBINLINE: return (SO_OOBINLINE); case LINUX_SO_LINGER: return (SO_LINGER); case LINUX_SO_REUSEPORT: return (SO_REUSEPORT_LB); case LINUX_SO_PASSCRED: return (LOCAL_CREDS_PERSISTENT); case LINUX_SO_PEERCRED: return (LOCAL_PEERCRED); case LINUX_SO_RCVLOWAT: return (SO_RCVLOWAT); case LINUX_SO_SNDLOWAT: return (SO_SNDLOWAT); case LINUX_SO_RCVTIMEO: return (SO_RCVTIMEO); case LINUX_SO_SNDTIMEO: return (SO_SNDTIMEO); case LINUX_SO_TIMESTAMPO: case LINUX_SO_TIMESTAMPN: return (SO_TIMESTAMP); case LINUX_SO_TIMESTAMPNSO: case LINUX_SO_TIMESTAMPNSN: return (SO_BINTIME); case LINUX_SO_ACCEPTCONN: return (SO_ACCEPTCONN); case LINUX_SO_PROTOCOL: return (SO_PROTOCOL); case LINUX_SO_DOMAIN: return (SO_DOMAIN); } return (-1); } static int linux_to_bsd_tcp_sockopt(int opt) { switch (opt) { case LINUX_TCP_NODELAY: return (TCP_NODELAY); case LINUX_TCP_MAXSEG: return (TCP_MAXSEG); case LINUX_TCP_CORK: return (TCP_NOPUSH); case LINUX_TCP_KEEPIDLE: return (TCP_KEEPIDLE); case LINUX_TCP_KEEPINTVL: return (TCP_KEEPINTVL); case LINUX_TCP_KEEPCNT: return (TCP_KEEPCNT); case LINUX_TCP_INFO: LINUX_RATELIMIT_MSG_OPT1( "unsupported TCP socket option TCP_INFO (%d)", opt); return (-2); case LINUX_TCP_MD5SIG: return (TCP_MD5SIG); } return (-1); } static int linux_to_bsd_msg_flags(int flags) { int ret_flags = 0; if (flags & LINUX_MSG_OOB) ret_flags |= MSG_OOB; if (flags & LINUX_MSG_PEEK) ret_flags |= MSG_PEEK; if (flags & LINUX_MSG_DONTROUTE) ret_flags |= MSG_DONTROUTE; if (flags & LINUX_MSG_CTRUNC) ret_flags |= MSG_CTRUNC; if (flags & LINUX_MSG_TRUNC) ret_flags |= MSG_TRUNC; if (flags & LINUX_MSG_DONTWAIT) ret_flags |= MSG_DONTWAIT; if (flags & LINUX_MSG_EOR) ret_flags |= MSG_EOR; if (flags & LINUX_MSG_WAITALL) ret_flags |= MSG_WAITALL; if (flags & LINUX_MSG_NOSIGNAL) ret_flags |= MSG_NOSIGNAL; if (flags & LINUX_MSG_PROXY) LINUX_RATELIMIT_MSG_OPT1("socket message flag MSG_PROXY (%d) not handled", LINUX_MSG_PROXY); if (flags & LINUX_MSG_FIN) LINUX_RATELIMIT_MSG_OPT1("socket message flag MSG_FIN (%d) not handled", LINUX_MSG_FIN); if (flags & LINUX_MSG_SYN) LINUX_RATELIMIT_MSG_OPT1("socket message flag MSG_SYN (%d) not handled", LINUX_MSG_SYN); if (flags & LINUX_MSG_CONFIRM) LINUX_RATELIMIT_MSG_OPT1("socket message flag MSG_CONFIRM (%d) not handled", LINUX_MSG_CONFIRM); if (flags & LINUX_MSG_RST) LINUX_RATELIMIT_MSG_OPT1("socket message flag MSG_RST (%d) not handled", LINUX_MSG_RST); if (flags & LINUX_MSG_ERRQUEUE) LINUX_RATELIMIT_MSG_OPT1("socket message flag MSG_ERRQUEUE (%d) not handled", LINUX_MSG_ERRQUEUE); return (ret_flags); } static int linux_to_bsd_cmsg_type(int cmsg_type) { switch (cmsg_type) { case LINUX_SCM_RIGHTS: return (SCM_RIGHTS); case LINUX_SCM_CREDENTIALS: return (SCM_CREDS); } return (-1); } static int bsd_to_linux_ip_cmsg_type(int cmsg_type) { switch (cmsg_type) { case IP_RECVORIGDSTADDR: return (LINUX_IP_RECVORIGDSTADDR); } return (-1); } static int bsd_to_linux_cmsg_type(struct proc *p, int cmsg_type, int cmsg_level) { struct linux_pemuldata *pem; if (cmsg_level == IPPROTO_IP) return (bsd_to_linux_ip_cmsg_type(cmsg_type)); if (cmsg_level != SOL_SOCKET) return (-1); pem = pem_find(p); switch (cmsg_type) { case SCM_RIGHTS: return (LINUX_SCM_RIGHTS); case SCM_CREDS: return (LINUX_SCM_CREDENTIALS); case SCM_CREDS2: return (LINUX_SCM_CREDENTIALS); case SCM_TIMESTAMP: return (pem->so_timestamp); case SCM_BINTIME: return (pem->so_timestampns); } return (-1); } static int linux_to_bsd_msghdr(struct msghdr *bhdr, const struct l_msghdr *lhdr) { if (lhdr->msg_controllen > INT_MAX) return (ENOBUFS); bhdr->msg_name = PTRIN(lhdr->msg_name); bhdr->msg_namelen = lhdr->msg_namelen; bhdr->msg_iov = PTRIN(lhdr->msg_iov); bhdr->msg_iovlen = lhdr->msg_iovlen; bhdr->msg_control = PTRIN(lhdr->msg_control); /* * msg_controllen is skipped since BSD and LINUX control messages * are potentially different sizes (e.g. the cred structure used * by SCM_CREDS is different between the two operating system). * * The caller can set it (if necessary) after converting all the * control messages. */ bhdr->msg_flags = linux_to_bsd_msg_flags(lhdr->msg_flags); return (0); } static int bsd_to_linux_msghdr(const struct msghdr *bhdr, struct l_msghdr *lhdr) { lhdr->msg_name = PTROUT(bhdr->msg_name); lhdr->msg_namelen = bhdr->msg_namelen; lhdr->msg_iov = PTROUT(bhdr->msg_iov); lhdr->msg_iovlen = bhdr->msg_iovlen; lhdr->msg_control = PTROUT(bhdr->msg_control); /* * msg_controllen is skipped since BSD and LINUX control messages * are potentially different sizes (e.g. the cred structure used * by SCM_CREDS is different between the two operating system). * * The caller can set it (if necessary) after converting all the * control messages. */ /* msg_flags skipped */ return (0); } static int linux_set_socket_flags(int lflags, int *flags) { if (lflags & ~(LINUX_SOCK_CLOEXEC | LINUX_SOCK_NONBLOCK)) return (EINVAL); if (lflags & LINUX_SOCK_NONBLOCK) *flags |= SOCK_NONBLOCK; if (lflags & LINUX_SOCK_CLOEXEC) *flags |= SOCK_CLOEXEC; return (0); } static int linux_copyout_sockaddr(const struct sockaddr *sa, void *uaddr, size_t len) { struct l_sockaddr *lsa; int error; error = bsd_to_linux_sockaddr(sa, &lsa, len); if (error != 0) return (error); error = copyout(lsa, uaddr, len); free(lsa, M_LINUX); return (error); } static int linux_sendit(struct thread *td, int s, struct msghdr *mp, int flags, struct mbuf *control, enum uio_seg segflg) { struct sockaddr *to; int error, len; if (mp->msg_name != NULL) { len = mp->msg_namelen; error = linux_to_bsd_sockaddr(mp->msg_name, &to, &len); if (error != 0) return (error); mp->msg_name = to; } else to = NULL; error = kern_sendit(td, s, mp, linux_to_bsd_msg_flags(flags), control, segflg); if (to) free(to, M_SONAME); return (error); } /* Return 0 if IP_HDRINCL is set for the given socket. */ static int linux_check_hdrincl(struct thread *td, int s) { int error, optval; socklen_t size_val; size_val = sizeof(optval); error = kern_getsockopt(td, s, IPPROTO_IP, IP_HDRINCL, &optval, UIO_SYSSPACE, &size_val); if (error != 0) return (error); return (optval == 0); } /* * Updated sendto() when IP_HDRINCL is set: * tweak endian-dependent fields in the IP packet. */ static int linux_sendto_hdrincl(struct thread *td, struct linux_sendto_args *linux_args) { /* * linux_ip_copysize defines how many bytes we should copy * from the beginning of the IP packet before we customize it for BSD. * It should include all the fields we modify (ip_len and ip_off). */ #define linux_ip_copysize 8 struct ip *packet; struct msghdr msg; struct iovec aiov[1]; int error; /* Check that the packet isn't too big or too small. */ if (linux_args->len < linux_ip_copysize || linux_args->len > IP_MAXPACKET) return (EINVAL); packet = (struct ip *)malloc(linux_args->len, M_LINUX, M_WAITOK); /* Make kernel copy of the packet to be sent */ if ((error = copyin(PTRIN(linux_args->msg), packet, linux_args->len))) goto goout; /* Convert fields from Linux to BSD raw IP socket format */ packet->ip_len = linux_args->len; packet->ip_off = ntohs(packet->ip_off); /* Prepare the msghdr and iovec structures describing the new packet */ msg.msg_name = PTRIN(linux_args->to); msg.msg_namelen = linux_args->tolen; msg.msg_iov = aiov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_flags = 0; aiov[0].iov_base = (char *)packet; aiov[0].iov_len = linux_args->len; error = linux_sendit(td, linux_args->s, &msg, linux_args->flags, NULL, UIO_SYSSPACE); goout: free(packet, M_LINUX); return (error); } static const char *linux_netlink_names[] = { [LINUX_NETLINK_ROUTE] = "ROUTE", [LINUX_NETLINK_SOCK_DIAG] = "SOCK_DIAG", [LINUX_NETLINK_NFLOG] = "NFLOG", [LINUX_NETLINK_SELINUX] = "SELINUX", [LINUX_NETLINK_AUDIT] = "AUDIT", [LINUX_NETLINK_FIB_LOOKUP] = "FIB_LOOKUP", [LINUX_NETLINK_NETFILTER] = "NETFILTER", [LINUX_NETLINK_KOBJECT_UEVENT] = "KOBJECT_UEVENT", }; int linux_socket(struct thread *td, struct linux_socket_args *args) { int domain, retval_socket, type; type = args->type & LINUX_SOCK_TYPE_MASK; if (type < 0 || type > LINUX_SOCK_MAX) return (EINVAL); retval_socket = linux_set_socket_flags(args->type & ~LINUX_SOCK_TYPE_MASK, &type); if (retval_socket != 0) return (retval_socket); domain = linux_to_bsd_domain(args->domain); if (domain == -1) { /* Mask off SOCK_NONBLOCK / CLOEXEC for error messages. */ type = args->type & LINUX_SOCK_TYPE_MASK; if (args->domain == LINUX_AF_NETLINK && args->protocol == LINUX_NETLINK_AUDIT) { ; /* Do nothing, quietly. */ } else if (args->domain == LINUX_AF_NETLINK) { const char *nl_name; if (args->protocol >= 0 && args->protocol < nitems(linux_netlink_names)) nl_name = linux_netlink_names[args->protocol]; else nl_name = NULL; if (nl_name != NULL) linux_msg(curthread, "unsupported socket(AF_NETLINK, %d, " "NETLINK_%s)", type, nl_name); else linux_msg(curthread, "unsupported socket(AF_NETLINK, %d, %d)", type, args->protocol); } else { linux_msg(curthread, "unsupported socket domain %d, " "type %d, protocol %d", args->domain, type, args->protocol); } return (EAFNOSUPPORT); } retval_socket = kern_socket(td, domain, type, args->protocol); if (retval_socket) return (retval_socket); if (type == SOCK_RAW && (args->protocol == IPPROTO_RAW || args->protocol == 0) && domain == PF_INET) { /* It's a raw IP socket: set the IP_HDRINCL option. */ int hdrincl; hdrincl = 1; /* We ignore any error returned by kern_setsockopt() */ kern_setsockopt(td, td->td_retval[0], IPPROTO_IP, IP_HDRINCL, &hdrincl, UIO_SYSSPACE, sizeof(hdrincl)); } #ifdef INET6 /* * Linux AF_INET6 socket has IPV6_V6ONLY setsockopt set to 0 by default * and some apps depend on this. So, set V6ONLY to 0 for Linux apps. * For simplicity we do this unconditionally of the net.inet6.ip6.v6only * sysctl value. */ if (domain == PF_INET6) { int v6only; v6only = 0; /* We ignore any error returned by setsockopt() */ kern_setsockopt(td, td->td_retval[0], IPPROTO_IPV6, IPV6_V6ONLY, &v6only, UIO_SYSSPACE, sizeof(v6only)); } #endif return (retval_socket); } int linux_bind(struct thread *td, struct linux_bind_args *args) { struct sockaddr *sa; int error; error = linux_to_bsd_sockaddr(PTRIN(args->name), &sa, &args->namelen); if (error != 0) return (error); error = kern_bindat(td, AT_FDCWD, args->s, sa); free(sa, M_SONAME); /* XXX */ if (error == EADDRNOTAVAIL && args->namelen != sizeof(struct sockaddr_in)) return (EINVAL); return (error); } int linux_connect(struct thread *td, struct linux_connect_args *args) { struct socket *so; struct sockaddr *sa; struct file *fp; int error; error = linux_to_bsd_sockaddr(PTRIN(args->name), &sa, &args->namelen); if (error != 0) return (error); error = kern_connectat(td, AT_FDCWD, args->s, sa); free(sa, M_SONAME); if (error != EISCONN) return (error); /* * Linux doesn't return EISCONN the first time it occurs, * when on a non-blocking socket. Instead it returns the * error getsockopt(SOL_SOCKET, SO_ERROR) would return on BSD. */ error = getsock(td, args->s, &cap_connect_rights, &fp); if (error != 0) return (error); error = EISCONN; so = fp->f_data; if (atomic_load_int(&fp->f_flag) & FNONBLOCK) { SOCK_LOCK(so); if (so->so_emuldata == 0) error = so->so_error; so->so_emuldata = (void *)1; SOCK_UNLOCK(so); } fdrop(fp, td); return (error); } int linux_listen(struct thread *td, struct linux_listen_args *args) { return (kern_listen(td, args->s, args->backlog)); } static int linux_accept_common(struct thread *td, int s, l_uintptr_t addr, l_uintptr_t namelen, int flags) { struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; struct file *fp, *fp1; struct socket *so; socklen_t len; int bflags, error, error1; bflags = 0; fp = NULL; error = linux_set_socket_flags(flags, &bflags); if (error != 0) return (error); if (PTRIN(addr) != NULL) { error = copyin(PTRIN(namelen), &len, sizeof(len)); if (error != 0) return (error); if (len < 0) return (EINVAL); } else len = 0; error = kern_accept4(td, s, (struct sockaddr *)&ss, bflags, &fp); /* * Translate errno values into ones used by Linux. */ if (error != 0) { /* * XXX. This is wrong, different sockaddr structures * have different sizes. */ switch (error) { case EFAULT: if (namelen != sizeof(struct sockaddr_in)) error = EINVAL; break; case EINVAL: error1 = getsock(td, s, &cap_accept_rights, &fp1); if (error1 != 0) { error = error1; break; } so = fp1->f_data; if (so->so_type == SOCK_DGRAM) error = EOPNOTSUPP; fdrop(fp1, td); break; } return (error); } if (PTRIN(addr) != NULL) { len = min(ss.ss_len, len); error = linux_copyout_sockaddr((struct sockaddr *)&ss, PTRIN(addr), len); if (error == 0) { len = ss.ss_len; error = copyout(&len, PTRIN(namelen), sizeof(len)); } if (error != 0) { fdclose(td, fp, td->td_retval[0]); td->td_retval[0] = 0; } } if (fp != NULL) fdrop(fp, td); return (error); } int linux_accept(struct thread *td, struct linux_accept_args *args) { return (linux_accept_common(td, args->s, args->addr, args->namelen, 0)); } int linux_accept4(struct thread *td, struct linux_accept4_args *args) { return (linux_accept_common(td, args->s, args->addr, args->namelen, args->flags)); } int linux_getsockname(struct thread *td, struct linux_getsockname_args *args) { - struct sockaddr *sa; - int len, error; + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; + socklen_t len; + int error; error = copyin(PTRIN(args->namelen), &len, sizeof(len)); if (error != 0) return (error); - error = kern_getsockname(td, args->s, &sa, &len); + error = kern_getsockname(td, args->s, (struct sockaddr *)&ss); if (error != 0) return (error); - if (len != 0) - error = linux_copyout_sockaddr(sa, PTRIN(args->addr), len); - - free(sa, M_SONAME); - if (error == 0) + len = min(ss.ss_len, len); + error = linux_copyout_sockaddr((struct sockaddr *)&ss, + PTRIN(args->addr), len); + if (error == 0) { + len = ss.ss_len; error = copyout(&len, PTRIN(args->namelen), sizeof(len)); + } return (error); } int linux_getpeername(struct thread *td, struct linux_getpeername_args *args) { - struct sockaddr *sa; - int len, error; + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; + socklen_t len; + int error; error = copyin(PTRIN(args->namelen), &len, sizeof(len)); if (error != 0) return (error); - if (len < 0) - return (EINVAL); - error = kern_getpeername(td, args->s, &sa, &len); + error = kern_getpeername(td, args->s, (struct sockaddr *)&ss); if (error != 0) return (error); - if (len != 0) - error = linux_copyout_sockaddr(sa, PTRIN(args->addr), len); - - free(sa, M_SONAME); - if (error == 0) + len = min(ss.ss_len, len); + error = linux_copyout_sockaddr((struct sockaddr *)&ss, + PTRIN(args->addr), len); + if (error == 0) { + len = ss.ss_len; error = copyout(&len, PTRIN(args->namelen), sizeof(len)); + } return (error); } int linux_socketpair(struct thread *td, struct linux_socketpair_args *args) { int domain, error, sv[2], type; domain = linux_to_bsd_domain(args->domain); if (domain != PF_LOCAL) return (EAFNOSUPPORT); type = args->type & LINUX_SOCK_TYPE_MASK; if (type < 0 || type > LINUX_SOCK_MAX) return (EINVAL); error = linux_set_socket_flags(args->type & ~LINUX_SOCK_TYPE_MASK, &type); if (error != 0) return (error); if (args->protocol != 0 && args->protocol != PF_UNIX) { /* * Use of PF_UNIX as protocol argument is not right, * but Linux does it. * Do not map PF_UNIX as its Linux value is identical * to FreeBSD one. */ return (EPROTONOSUPPORT); } error = kern_socketpair(td, domain, type, 0, sv); if (error != 0) return (error); error = copyout(sv, PTRIN(args->rsv), 2 * sizeof(int)); if (error != 0) { (void)kern_close(td, sv[0]); (void)kern_close(td, sv[1]); } return (error); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) struct linux_send_args { register_t s; register_t msg; register_t len; register_t flags; }; static int linux_send(struct thread *td, struct linux_send_args *args) { struct sendto_args /* { int s; caddr_t buf; int len; int flags; caddr_t to; int tolen; } */ bsd_args; struct file *fp; int error; bsd_args.s = args->s; bsd_args.buf = (caddr_t)PTRIN(args->msg); bsd_args.len = args->len; bsd_args.flags = linux_to_bsd_msg_flags(args->flags); bsd_args.to = NULL; bsd_args.tolen = 0; error = sys_sendto(td, &bsd_args); if (error == ENOTCONN) { /* * Linux doesn't return ENOTCONN for non-blocking sockets. * Instead it returns the EAGAIN. */ error = getsock(td, args->s, &cap_send_rights, &fp); if (error == 0) { if (atomic_load_int(&fp->f_flag) & FNONBLOCK) error = EAGAIN; fdrop(fp, td); } } return (error); } struct linux_recv_args { register_t s; register_t msg; register_t len; register_t flags; }; static int linux_recv(struct thread *td, struct linux_recv_args *args) { struct recvfrom_args /* { int s; caddr_t buf; int len; int flags; struct sockaddr *from; socklen_t fromlenaddr; } */ bsd_args; bsd_args.s = args->s; bsd_args.buf = (caddr_t)PTRIN(args->msg); bsd_args.len = args->len; bsd_args.flags = linux_to_bsd_msg_flags(args->flags); bsd_args.from = NULL; bsd_args.fromlenaddr = 0; return (sys_recvfrom(td, &bsd_args)); } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ int linux_sendto(struct thread *td, struct linux_sendto_args *args) { struct msghdr msg; struct iovec aiov; struct socket *so; struct file *fp; int error; if (linux_check_hdrincl(td, args->s) == 0) /* IP_HDRINCL set, tweak the packet before sending */ return (linux_sendto_hdrincl(td, args)); bzero(&msg, sizeof(msg)); error = getsock(td, args->s, &cap_send_connect_rights, &fp); if (error != 0) return (error); so = fp->f_data; if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { msg.msg_name = PTRIN(args->to); msg.msg_namelen = args->tolen; } msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = PTRIN(args->msg); aiov.iov_len = args->len; fdrop(fp, td); return (linux_sendit(td, args->s, &msg, args->flags, NULL, UIO_USERSPACE)); } int linux_recvfrom(struct thread *td, struct linux_recvfrom_args *args) { struct sockaddr *sa; struct msghdr msg; struct iovec aiov; int error, fromlen; if (PTRIN(args->fromlen) != NULL) { error = copyin(PTRIN(args->fromlen), &fromlen, sizeof(fromlen)); if (error != 0) return (error); if (fromlen < 0) return (EINVAL); fromlen = min(fromlen, SOCK_MAXADDRLEN); sa = malloc(fromlen, M_SONAME, M_WAITOK); } else { fromlen = 0; sa = NULL; } msg.msg_name = sa; msg.msg_namelen = fromlen; msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = PTRIN(args->buf); aiov.iov_len = args->len; msg.msg_control = 0; msg.msg_flags = linux_to_bsd_msg_flags(args->flags); error = kern_recvit(td, args->s, &msg, UIO_SYSSPACE, NULL); if (error != 0) goto out; /* * XXX. Seems that FreeBSD is different from Linux here. Linux * fill source address if underlying protocol provides it, while * FreeBSD fill it if underlying protocol is not connection-oriented. * So, kern_recvit() set msg.msg_namelen to 0 if protocol pr_flags * does not contains PR_ADDR flag. */ if (PTRIN(args->from) != NULL && msg.msg_namelen != 0) error = linux_copyout_sockaddr(sa, PTRIN(args->from), msg.msg_namelen); if (error == 0 && PTRIN(args->fromlen) != NULL) error = copyout(&msg.msg_namelen, PTRIN(args->fromlen), sizeof(msg.msg_namelen)); out: free(sa, M_SONAME); return (error); } static int linux_sendmsg_common(struct thread *td, l_int s, struct l_msghdr *msghdr, l_uint flags) { + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; struct cmsghdr *cmsg; struct mbuf *control; struct msghdr msg; struct l_cmsghdr linux_cmsg; struct l_cmsghdr *ptr_cmsg; struct l_msghdr linux_msghdr; struct iovec *iov; socklen_t datalen; - struct sockaddr *sa; struct socket *so; sa_family_t sa_family; struct file *fp; void *data; l_size_t len; l_size_t clen; int error; error = copyin(msghdr, &linux_msghdr, sizeof(linux_msghdr)); if (error != 0) return (error); /* * Some Linux applications (ping) define a non-NULL control data * pointer, but a msg_controllen of 0, which is not allowed in the * FreeBSD system call interface. NULL the msg_control pointer in * order to handle this case. This should be checked, but allows the * Linux ping to work. */ if (PTRIN(linux_msghdr.msg_control) != NULL && linux_msghdr.msg_controllen == 0) linux_msghdr.msg_control = PTROUT(NULL); error = linux_to_bsd_msghdr(&msg, &linux_msghdr); if (error != 0) return (error); #ifdef COMPAT_LINUX32 error = freebsd32_copyiniov(PTRIN(msg.msg_iov), msg.msg_iovlen, &iov, EMSGSIZE); #else error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); #endif if (error != 0) return (error); control = NULL; - error = kern_getsockname(td, s, &sa, &datalen); + error = kern_getsockname(td, s, (struct sockaddr *)&ss); if (error != 0) goto bad; - sa_family = sa->sa_family; - free(sa, M_SONAME); + sa_family = ss.ss_family; if (flags & LINUX_MSG_OOB) { error = EOPNOTSUPP; if (sa_family == AF_UNIX) goto bad; error = getsock(td, s, &cap_send_rights, &fp); if (error != 0) goto bad; so = fp->f_data; if (so->so_type != SOCK_STREAM) error = EOPNOTSUPP; fdrop(fp, td); if (error != 0) goto bad; } if (linux_msghdr.msg_controllen >= sizeof(struct l_cmsghdr)) { error = ENOBUFS; control = m_get(M_WAITOK, MT_CONTROL); MCLGET(control, M_WAITOK); data = mtod(control, void *); datalen = 0; ptr_cmsg = PTRIN(linux_msghdr.msg_control); clen = linux_msghdr.msg_controllen; do { error = copyin(ptr_cmsg, &linux_cmsg, sizeof(struct l_cmsghdr)); if (error != 0) goto bad; error = EINVAL; if (linux_cmsg.cmsg_len < sizeof(struct l_cmsghdr) || linux_cmsg.cmsg_len > clen) goto bad; if (datalen + CMSG_HDRSZ > MCLBYTES) goto bad; /* * Now we support only SCM_RIGHTS and SCM_CRED, * so return EINVAL in any other cmsg_type */ cmsg = data; cmsg->cmsg_type = linux_to_bsd_cmsg_type(linux_cmsg.cmsg_type); cmsg->cmsg_level = linux_to_bsd_sockopt_level(linux_cmsg.cmsg_level); if (cmsg->cmsg_type == -1 || cmsg->cmsg_level != SOL_SOCKET) { linux_msg(curthread, "unsupported sendmsg cmsg level %d type %d", linux_cmsg.cmsg_level, linux_cmsg.cmsg_type); goto bad; } /* * Some applications (e.g. pulseaudio) attempt to * send ancillary data even if the underlying protocol * doesn't support it which is not allowed in the * FreeBSD system call interface. */ if (sa_family != AF_UNIX) goto next; if (cmsg->cmsg_type == SCM_CREDS) { len = sizeof(struct cmsgcred); if (datalen + CMSG_SPACE(len) > MCLBYTES) goto bad; /* * The lower levels will fill in the structure */ memset(CMSG_DATA(data), 0, len); } else { len = linux_cmsg.cmsg_len - L_CMSG_HDRSZ; if (datalen + CMSG_SPACE(len) < datalen || datalen + CMSG_SPACE(len) > MCLBYTES) goto bad; error = copyin(LINUX_CMSG_DATA(ptr_cmsg), CMSG_DATA(data), len); if (error != 0) goto bad; } cmsg->cmsg_len = CMSG_LEN(len); data = (char *)data + CMSG_SPACE(len); datalen += CMSG_SPACE(len); next: if (clen <= LINUX_CMSG_ALIGN(linux_cmsg.cmsg_len)) break; clen -= LINUX_CMSG_ALIGN(linux_cmsg.cmsg_len); ptr_cmsg = (struct l_cmsghdr *)((char *)ptr_cmsg + LINUX_CMSG_ALIGN(linux_cmsg.cmsg_len)); } while(clen >= sizeof(struct l_cmsghdr)); control->m_len = datalen; if (datalen == 0) { m_freem(control); control = NULL; } } msg.msg_iov = iov; msg.msg_flags = 0; error = linux_sendit(td, s, &msg, flags, control, UIO_USERSPACE); control = NULL; bad: m_freem(control); free(iov, M_IOV); return (error); } int linux_sendmsg(struct thread *td, struct linux_sendmsg_args *args) { return (linux_sendmsg_common(td, args->s, PTRIN(args->msg), args->flags)); } int linux_sendmmsg(struct thread *td, struct linux_sendmmsg_args *args) { struct l_mmsghdr *msg; l_uint retval; int error, datagrams; if (args->vlen > UIO_MAXIOV) args->vlen = UIO_MAXIOV; msg = PTRIN(args->msg); datagrams = 0; while (datagrams < args->vlen) { error = linux_sendmsg_common(td, args->s, &msg->msg_hdr, args->flags); if (error != 0) break; retval = td->td_retval[0]; error = copyout(&retval, &msg->msg_len, sizeof(msg->msg_len)); if (error != 0) break; ++msg; ++datagrams; } if (error == 0) td->td_retval[0] = datagrams; return (error); } static int recvmsg_scm_rights(struct thread *td, l_uint flags, socklen_t *datalen, void **data, void **udata) { int i, fd, fds, *fdp; if (flags & LINUX_MSG_CMSG_CLOEXEC) { fds = *datalen / sizeof(int); fdp = *data; for (i = 0; i < fds; i++) { fd = *fdp++; (void)kern_fcntl(td, fd, F_SETFD, FD_CLOEXEC); } } return (0); } static int recvmsg_scm_creds(socklen_t *datalen, void **data, void **udata) { struct cmsgcred *cmcred; struct l_ucred lu; cmcred = *data; lu.pid = cmcred->cmcred_pid; lu.uid = cmcred->cmcred_uid; lu.gid = cmcred->cmcred_gid; memmove(*data, &lu, sizeof(lu)); *datalen = sizeof(lu); return (0); } _Static_assert(sizeof(struct cmsgcred) >= sizeof(struct l_ucred), "scm_creds sizeof l_ucred"); static int recvmsg_scm_creds2(socklen_t *datalen, void **data, void **udata) { struct sockcred2 *scred; struct l_ucred lu; scred = *data; lu.pid = scred->sc_pid; lu.uid = scred->sc_uid; lu.gid = scred->sc_gid; memmove(*data, &lu, sizeof(lu)); *datalen = sizeof(lu); return (0); } _Static_assert(sizeof(struct sockcred2) >= sizeof(struct l_ucred), "scm_creds2 sizeof l_ucred"); #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) static int recvmsg_scm_timestamp(l_int msg_type, socklen_t *datalen, void **data, void **udata) { l_sock_timeval ltv64; l_timeval ltv; struct timeval *tv; socklen_t len; void *buf; if (*datalen != sizeof(struct timeval)) return (EMSGSIZE); tv = *data; #if defined(COMPAT_LINUX32) if (msg_type == LINUX_SCM_TIMESTAMPO && (tv->tv_sec > INT_MAX || tv->tv_sec < INT_MIN)) return (EOVERFLOW); #endif if (msg_type == LINUX_SCM_TIMESTAMPN) len = sizeof(ltv64); else len = sizeof(ltv); buf = malloc(len, M_LINUX, M_WAITOK); if (msg_type == LINUX_SCM_TIMESTAMPN) { ltv64.tv_sec = tv->tv_sec; ltv64.tv_usec = tv->tv_usec; memmove(buf, <v64, len); } else { ltv.tv_sec = tv->tv_sec; ltv.tv_usec = tv->tv_usec; memmove(buf, <v, len); } *data = *udata = buf; *datalen = len; return (0); } #else _Static_assert(sizeof(struct timeval) == sizeof(l_timeval), "scm_timestamp sizeof l_timeval"); #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) static int recvmsg_scm_timestampns(l_int msg_type, socklen_t *datalen, void **data, void **udata) { struct l_timespec64 ts64; struct l_timespec ts32; struct timespec ts; socklen_t len; void *buf; if (msg_type == LINUX_SCM_TIMESTAMPNSO) len = sizeof(ts32); else len = sizeof(ts64); buf = malloc(len, M_LINUX, M_WAITOK); bintime2timespec(*data, &ts); if (msg_type == LINUX_SCM_TIMESTAMPNSO) { ts32.tv_sec = ts.tv_sec; ts32.tv_nsec = ts.tv_nsec; memmove(buf, &ts32, len); } else { ts64.tv_sec = ts.tv_sec; ts64.tv_nsec = ts.tv_nsec; memmove(buf, &ts64, len); } *data = *udata = buf; *datalen = len; return (0); } #else static int recvmsg_scm_timestampns(l_int msg_type, socklen_t *datalen, void **data, void **udata) { struct timespec ts; bintime2timespec(*data, &ts); memmove(*data, &ts, sizeof(struct timespec)); *datalen = sizeof(struct timespec); return (0); } _Static_assert(sizeof(struct bintime) >= sizeof(struct timespec), "scm_timestampns sizeof timespec"); #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ static int recvmsg_scm_sol_socket(struct thread *td, l_int msg_type, l_int lmsg_type, l_uint flags, socklen_t *datalen, void **data, void **udata) { int error; error = 0; switch (msg_type) { case SCM_RIGHTS: error = recvmsg_scm_rights(td, flags, datalen, data, udata); break; case SCM_CREDS: error = recvmsg_scm_creds(datalen, data, udata); break; case SCM_CREDS2: error = recvmsg_scm_creds2(datalen, data, udata); break; case SCM_TIMESTAMP: #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) error = recvmsg_scm_timestamp(lmsg_type, datalen, data, udata); #endif break; case SCM_BINTIME: error = recvmsg_scm_timestampns(lmsg_type, datalen, data, udata); break; } return (error); } static int recvmsg_scm_ip_origdstaddr(socklen_t *datalen, void **data, void **udata) { struct l_sockaddr *lsa; int error; error = bsd_to_linux_sockaddr(*data, &lsa, *datalen); if (error == 0) { *data = *udata = lsa; *datalen = sizeof(*lsa); } return (error); } static int recvmsg_scm_ipproto_ip(l_int msg_type, l_int lmsg_type, socklen_t *datalen, void **data, void **udata) { int error; error = 0; switch (msg_type) { case IP_ORIGDSTADDR: error = recvmsg_scm_ip_origdstaddr(datalen, data, udata); break; } return (error); } static int linux_recvmsg_common(struct thread *td, l_int s, struct l_msghdr *msghdr, l_uint flags, struct msghdr *msg) { struct proc *p = td->td_proc; struct cmsghdr *cm; struct l_cmsghdr *lcm = NULL; socklen_t datalen, maxlen, outlen; struct l_msghdr l_msghdr; struct iovec *iov, *uiov; struct mbuf *m, *control = NULL; struct mbuf **controlp; struct sockaddr *sa; caddr_t outbuf; void *data, *udata; int error, skiped; error = copyin(msghdr, &l_msghdr, sizeof(l_msghdr)); if (error != 0) return (error); /* * Pass user-supplied recvmsg() flags in msg_flags field, * following sys_recvmsg() convention. */ l_msghdr.msg_flags = flags; error = linux_to_bsd_msghdr(msg, &l_msghdr); if (error != 0) return (error); #ifdef COMPAT_LINUX32 error = freebsd32_copyiniov(PTRIN(msg->msg_iov), msg->msg_iovlen, &iov, EMSGSIZE); #else error = copyiniov(msg->msg_iov, msg->msg_iovlen, &iov, EMSGSIZE); #endif if (error != 0) return (error); if (msg->msg_name != NULL && msg->msg_namelen > 0) { msg->msg_namelen = min(msg->msg_namelen, SOCK_MAXADDRLEN); sa = malloc(msg->msg_namelen, M_SONAME, M_WAITOK); msg->msg_name = sa; } else { sa = NULL; msg->msg_name = NULL; } uiov = msg->msg_iov; msg->msg_iov = iov; controlp = (msg->msg_control != NULL) ? &control : NULL; error = kern_recvit(td, s, msg, UIO_SYSSPACE, controlp); msg->msg_iov = uiov; if (error != 0) goto bad; /* * Note that kern_recvit() updates msg->msg_namelen. */ if (msg->msg_name != NULL && msg->msg_namelen > 0) { msg->msg_name = PTRIN(l_msghdr.msg_name); error = linux_copyout_sockaddr(sa, msg->msg_name, msg->msg_namelen); if (error != 0) goto bad; } error = bsd_to_linux_msghdr(msg, &l_msghdr); if (error != 0) goto bad; skiped = outlen = 0; maxlen = l_msghdr.msg_controllen; if (control == NULL) goto out; lcm = malloc(L_CMSG_HDRSZ, M_LINUX, M_WAITOK | M_ZERO); msg->msg_control = mtod(control, struct cmsghdr *); msg->msg_controllen = control->m_len; outbuf = PTRIN(l_msghdr.msg_control); for (m = control; m != NULL; m = m->m_next) { cm = mtod(m, struct cmsghdr *); lcm->cmsg_type = bsd_to_linux_cmsg_type(p, cm->cmsg_type, cm->cmsg_level); lcm->cmsg_level = bsd_to_linux_sockopt_level(cm->cmsg_level); if (lcm->cmsg_type == -1 || cm->cmsg_level == -1) { LINUX_RATELIMIT_MSG_OPT2( "unsupported recvmsg cmsg level %d type %d", cm->cmsg_level, cm->cmsg_type); /* Skip unsupported messages */ skiped++; continue; } data = CMSG_DATA(cm); datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; udata = NULL; error = 0; switch (cm->cmsg_level) { case IPPROTO_IP: error = recvmsg_scm_ipproto_ip(cm->cmsg_type, lcm->cmsg_type, &datalen, &data, &udata); break; case SOL_SOCKET: error = recvmsg_scm_sol_socket(td, cm->cmsg_type, lcm->cmsg_type, flags, &datalen, &data, &udata); break; } /* The recvmsg_scm_ is responsible to free udata on error. */ if (error != 0) goto bad; if (outlen + LINUX_CMSG_LEN(datalen) > maxlen) { if (outlen == 0) { error = EMSGSIZE; goto err; } else { l_msghdr.msg_flags |= LINUX_MSG_CTRUNC; m_dispose_extcontrolm(control); free(udata, M_LINUX); goto out; } } lcm->cmsg_len = LINUX_CMSG_LEN(datalen); error = copyout(lcm, outbuf, L_CMSG_HDRSZ); if (error == 0) { error = copyout(data, LINUX_CMSG_DATA(outbuf), datalen); if (error == 0) { outbuf += LINUX_CMSG_SPACE(datalen); outlen += LINUX_CMSG_SPACE(datalen); } } err: free(udata, M_LINUX); if (error != 0) goto bad; } if (outlen == 0 && skiped > 0) { error = EINVAL; goto bad; } out: l_msghdr.msg_controllen = outlen; error = copyout(&l_msghdr, msghdr, sizeof(l_msghdr)); bad: if (control != NULL) { if (error != 0) m_dispose_extcontrolm(control); m_freem(control); } free(iov, M_IOV); free(lcm, M_LINUX); free(sa, M_SONAME); return (error); } int linux_recvmsg(struct thread *td, struct linux_recvmsg_args *args) { struct msghdr bsd_msg; struct file *fp; int error; error = getsock(td, args->s, &cap_recv_rights, &fp); if (error != 0) return (error); fdrop(fp, td); return (linux_recvmsg_common(td, args->s, PTRIN(args->msg), args->flags, &bsd_msg)); } static int linux_recvmmsg_common(struct thread *td, l_int s, struct l_mmsghdr *msg, l_uint vlen, l_uint flags, struct timespec *tts) { struct msghdr bsd_msg; struct timespec ts; struct file *fp; l_uint retval; int error, datagrams; error = getsock(td, s, &cap_recv_rights, &fp); if (error != 0) return (error); datagrams = 0; while (datagrams < vlen) { error = linux_recvmsg_common(td, s, &msg->msg_hdr, flags & ~LINUX_MSG_WAITFORONE, &bsd_msg); if (error != 0) break; retval = td->td_retval[0]; error = copyout(&retval, &msg->msg_len, sizeof(msg->msg_len)); if (error != 0) break; ++msg; ++datagrams; /* * MSG_WAITFORONE turns on MSG_DONTWAIT after one packet. */ if (flags & LINUX_MSG_WAITFORONE) flags |= LINUX_MSG_DONTWAIT; /* * See BUGS section of recvmmsg(2). */ if (tts) { getnanotime(&ts); timespecsub(&ts, tts, &ts); if (!timespecisset(&ts) || ts.tv_sec > 0) break; } /* Out of band data, return right away. */ if (bsd_msg.msg_flags & MSG_OOB) break; } if (error == 0) td->td_retval[0] = datagrams; fdrop(fp, td); return (error); } int linux_recvmmsg(struct thread *td, struct linux_recvmmsg_args *args) { struct timespec ts, tts, *ptts; int error; if (args->timeout) { error = linux_get_timespec(&ts, args->timeout); if (error != 0) return (error); getnanotime(&tts); timespecadd(&tts, &ts, &tts); ptts = &tts; } else ptts = NULL; return (linux_recvmmsg_common(td, args->s, PTRIN(args->msg), args->vlen, args->flags, ptts)); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_recvmmsg_time64(struct thread *td, struct linux_recvmmsg_time64_args *args) { struct timespec ts, tts, *ptts; int error; if (args->timeout) { error = linux_get_timespec64(&ts, args->timeout); if (error != 0) return (error); getnanotime(&tts); timespecadd(&tts, &ts, &tts); ptts = &tts; } else ptts = NULL; return (linux_recvmmsg_common(td, args->s, PTRIN(args->msg), args->vlen, args->flags, ptts)); } #endif int linux_shutdown(struct thread *td, struct linux_shutdown_args *args) { return (kern_shutdown(td, args->s, args->how)); } int linux_setsockopt(struct thread *td, struct linux_setsockopt_args *args) { struct proc *p = td->td_proc; struct linux_pemuldata *pem; l_timeval linux_tv; struct sockaddr *sa; struct timeval tv; socklen_t len; int error, level, name, val; level = linux_to_bsd_sockopt_level(args->level); switch (level) { case SOL_SOCKET: name = linux_to_bsd_so_sockopt(args->optname); switch (name) { case LOCAL_CREDS_PERSISTENT: level = SOL_LOCAL; break; case SO_RCVTIMEO: /* FALLTHROUGH */ case SO_SNDTIMEO: error = copyin(PTRIN(args->optval), &linux_tv, sizeof(linux_tv)); if (error != 0) return (error); tv.tv_sec = linux_tv.tv_sec; tv.tv_usec = linux_tv.tv_usec; return (kern_setsockopt(td, args->s, level, name, &tv, UIO_SYSSPACE, sizeof(tv))); /* NOTREACHED */ case SO_TIMESTAMP: /* overwrite SO_BINTIME */ val = 0; error = kern_setsockopt(td, args->s, level, SO_BINTIME, &val, UIO_SYSSPACE, sizeof(val)); if (error != 0) return (error); pem = pem_find(p); pem->so_timestamp = args->optname; break; case SO_BINTIME: /* overwrite SO_TIMESTAMP */ val = 0; error = kern_setsockopt(td, args->s, level, SO_TIMESTAMP, &val, UIO_SYSSPACE, sizeof(val)); if (error != 0) return (error); pem = pem_find(p); pem->so_timestampns = args->optname; break; default: break; } break; case IPPROTO_IP: if (args->optname == LINUX_IP_RECVERR && linux_ignore_ip_recverr) { /* * XXX: This is a hack to unbreak DNS resolution * with glibc 2.30 and above. */ return (0); } name = linux_to_bsd_ip_sockopt(args->optname); break; case IPPROTO_IPV6: name = linux_to_bsd_ip6_sockopt(args->optname); break; case IPPROTO_TCP: name = linux_to_bsd_tcp_sockopt(args->optname); break; case SOL_NETLINK: level = SOL_SOCKET; name = args->optname; break; default: name = -1; break; } if (name < 0) { if (name == -1) linux_msg(curthread, "unsupported setsockopt level %d optname %d", args->level, args->optname); return (ENOPROTOOPT); } if (name == IPV6_NEXTHOP) { len = args->optlen; error = linux_to_bsd_sockaddr(PTRIN(args->optval), &sa, &len); if (error != 0) return (error); error = kern_setsockopt(td, args->s, level, name, sa, UIO_SYSSPACE, len); free(sa, M_SONAME); } else { error = kern_setsockopt(td, args->s, level, name, PTRIN(args->optval), UIO_USERSPACE, args->optlen); } return (error); } static int linux_sockopt_copyout(struct thread *td, void *val, socklen_t len, struct linux_getsockopt_args *args) { int error; error = copyout(val, PTRIN(args->optval), len); if (error == 0) error = copyout(&len, PTRIN(args->optlen), sizeof(len)); return (error); } static int linux_getsockopt_so_peergroups(struct thread *td, struct linux_getsockopt_args *args) { struct xucred xu; socklen_t xulen, len; int error, i; xulen = sizeof(xu); error = kern_getsockopt(td, args->s, 0, LOCAL_PEERCRED, &xu, UIO_SYSSPACE, &xulen); if (error != 0) return (error); len = xu.cr_ngroups * sizeof(l_gid_t); if (args->optlen < len) { error = copyout(&len, PTRIN(args->optlen), sizeof(len)); if (error == 0) error = ERANGE; return (error); } /* * "- 1" to skip the primary group. */ for (i = 0; i < xu.cr_ngroups - 1; i++) { error = copyout(xu.cr_groups + i + 1, (void *)(args->optval + i * sizeof(l_gid_t)), sizeof(l_gid_t)); if (error != 0) return (error); } error = copyout(&len, PTRIN(args->optlen), sizeof(len)); return (error); } static int linux_getsockopt_so_peersec(struct thread *td, struct linux_getsockopt_args *args) { socklen_t len; int error; len = sizeof(SECURITY_CONTEXT_STRING); if (args->optlen < len) { error = copyout(&len, PTRIN(args->optlen), sizeof(len)); if (error == 0) error = ERANGE; return (error); } return (linux_sockopt_copyout(td, SECURITY_CONTEXT_STRING, len, args)); } static int linux_getsockopt_so_linger(struct thread *td, struct linux_getsockopt_args *args) { struct linger ling; socklen_t len; int error; len = sizeof(ling); error = kern_getsockopt(td, args->s, SOL_SOCKET, SO_LINGER, &ling, UIO_SYSSPACE, &len); if (error != 0) return (error); ling.l_onoff = ((ling.l_onoff & SO_LINGER) != 0); return (linux_sockopt_copyout(td, &ling, len, args)); } int linux_getsockopt(struct thread *td, struct linux_getsockopt_args *args) { l_timeval linux_tv; struct timeval tv; socklen_t tv_len, xulen, len; struct sockaddr *sa; struct xucred xu; struct l_ucred lxu; int error, level, name, newval; level = linux_to_bsd_sockopt_level(args->level); switch (level) { case SOL_SOCKET: switch (args->optname) { case LINUX_SO_PEERGROUPS: return (linux_getsockopt_so_peergroups(td, args)); case LINUX_SO_PEERSEC: return (linux_getsockopt_so_peersec(td, args)); default: break; } name = linux_to_bsd_so_sockopt(args->optname); switch (name) { case LOCAL_CREDS_PERSISTENT: level = SOL_LOCAL; break; case SO_RCVTIMEO: /* FALLTHROUGH */ case SO_SNDTIMEO: tv_len = sizeof(tv); error = kern_getsockopt(td, args->s, level, name, &tv, UIO_SYSSPACE, &tv_len); if (error != 0) return (error); linux_tv.tv_sec = tv.tv_sec; linux_tv.tv_usec = tv.tv_usec; return (linux_sockopt_copyout(td, &linux_tv, sizeof(linux_tv), args)); /* NOTREACHED */ case LOCAL_PEERCRED: if (args->optlen < sizeof(lxu)) return (EINVAL); /* * LOCAL_PEERCRED is not served at the SOL_SOCKET level, * but by the Unix socket's level 0. */ level = 0; xulen = sizeof(xu); error = kern_getsockopt(td, args->s, level, name, &xu, UIO_SYSSPACE, &xulen); if (error != 0) return (error); lxu.pid = xu.cr_pid; lxu.uid = xu.cr_uid; lxu.gid = xu.cr_gid; return (linux_sockopt_copyout(td, &lxu, sizeof(lxu), args)); /* NOTREACHED */ case SO_ERROR: len = sizeof(newval); error = kern_getsockopt(td, args->s, level, name, &newval, UIO_SYSSPACE, &len); if (error != 0) return (error); newval = -bsd_to_linux_errno(newval); return (linux_sockopt_copyout(td, &newval, len, args)); /* NOTREACHED */ case SO_DOMAIN: len = sizeof(newval); error = kern_getsockopt(td, args->s, level, name, &newval, UIO_SYSSPACE, &len); if (error != 0) return (error); newval = bsd_to_linux_domain(newval); if (newval == -1) return (ENOPROTOOPT); return (linux_sockopt_copyout(td, &newval, len, args)); /* NOTREACHED */ case SO_LINGER: return (linux_getsockopt_so_linger(td, args)); /* NOTREACHED */ default: break; } break; case IPPROTO_IP: name = linux_to_bsd_ip_sockopt(args->optname); break; case IPPROTO_IPV6: name = linux_to_bsd_ip6_sockopt(args->optname); break; case IPPROTO_TCP: name = linux_to_bsd_tcp_sockopt(args->optname); break; default: name = -1; break; } if (name < 0) { if (name == -1) linux_msg(curthread, "unsupported getsockopt level %d optname %d", args->level, args->optname); return (EINVAL); } if (name == IPV6_NEXTHOP) { error = copyin(PTRIN(args->optlen), &len, sizeof(len)); if (error != 0) return (error); sa = malloc(len, M_SONAME, M_WAITOK); error = kern_getsockopt(td, args->s, level, name, sa, UIO_SYSSPACE, &len); if (error != 0) goto out; error = linux_copyout_sockaddr(sa, PTRIN(args->optval), len); if (error == 0) error = copyout(&len, PTRIN(args->optlen), sizeof(len)); out: free(sa, M_SONAME); } else { if (args->optval) { error = copyin(PTRIN(args->optlen), &len, sizeof(len)); if (error != 0) return (error); } error = kern_getsockopt(td, args->s, level, name, PTRIN(args->optval), UIO_USERSPACE, &len); if (error == 0) error = copyout(&len, PTRIN(args->optlen), sizeof(len)); } return (error); } /* * Based on sendfile_getsock from kern_sendfile.c * Determines whether an fd is a stream socket that can be used * with FreeBSD sendfile. */ static bool is_sendfile(struct file *fp, struct file *ofp) { struct socket *so; /* * FreeBSD sendfile() system call sends a regular file or * shared memory object out a stream socket. */ if ((fp->f_type != DTYPE_SHM && fp->f_type != DTYPE_VNODE) || (fp->f_type == DTYPE_VNODE && (fp->f_vnode == NULL || fp->f_vnode->v_type != VREG))) return (false); /* * The socket must be a stream socket and connected. */ if (ofp->f_type != DTYPE_SOCKET) return (false); so = ofp->f_data; if (so->so_type != SOCK_STREAM) return (false); /* * SCTP one-to-one style sockets currently don't work with * sendfile(). */ if (so->so_proto->pr_protocol == IPPROTO_SCTP) return (false); return (!SOLISTENING(so)); } static bool is_regular_file(struct file *fp) { return (fp->f_type == DTYPE_VNODE && fp->f_vnode != NULL && fp->f_vnode->v_type == VREG); } static int sendfile_fallback(struct thread *td, struct file *fp, l_int out, off_t *offset, l_size_t count, off_t *sbytes) { off_t current_offset, out_offset, to_send; l_size_t bytes_sent, n_read; struct file *ofp; struct iovec aiov; struct uio auio; bool seekable; size_t bufsz; void *buf; int flags, error; if (offset == NULL) { if ((error = fo_seek(fp, 0, SEEK_CUR, td)) != 0) return (error); current_offset = td->td_uretoff.tdu_off; } else { if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) return (ESPIPE); current_offset = *offset; } error = fget_write(td, out, &cap_pwrite_rights, &ofp); if (error != 0) return (error); seekable = (ofp->f_ops->fo_flags & DFLAG_SEEKABLE) != 0; if (seekable) { if ((error = fo_seek(ofp, 0, SEEK_CUR, td)) != 0) goto drop; out_offset = td->td_uretoff.tdu_off; } else out_offset = 0; flags = FOF_OFFSET | FOF_NOUPDATE; bufsz = min(count, MAXPHYS); buf = malloc(bufsz, M_LINUX, M_WAITOK); bytes_sent = 0; while (bytes_sent < count) { to_send = min(count - bytes_sent, bufsz); aiov.iov_base = buf; aiov.iov_len = bufsz; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_rw = UIO_READ; auio.uio_offset = current_offset; auio.uio_resid = to_send; error = fo_read(fp, &auio, fp->f_cred, flags, td); if (error != 0) break; n_read = to_send - auio.uio_resid; if (n_read == 0) break; aiov.iov_base = buf; aiov.iov_len = bufsz; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_rw = UIO_WRITE; auio.uio_offset = (seekable) ? out_offset : 0; auio.uio_resid = n_read; error = fo_write(ofp, &auio, ofp->f_cred, flags, td); if (error != 0) break; bytes_sent += n_read; current_offset += n_read; out_offset += n_read; } free(buf, M_LINUX); if (error == 0) { *sbytes = bytes_sent; if (offset != NULL) *offset = current_offset; else error = fo_seek(fp, current_offset, SEEK_SET, td); } if (error == 0 && seekable) error = fo_seek(ofp, out_offset, SEEK_SET, td); drop: fdrop(ofp, td); return (error); } static int sendfile_sendfile(struct thread *td, struct file *fp, l_int out, off_t *offset, l_size_t count, off_t *sbytes) { off_t current_offset; int error; if (offset == NULL) { if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) return (ESPIPE); if ((error = fo_seek(fp, 0, SEEK_CUR, td)) != 0) return (error); current_offset = td->td_uretoff.tdu_off; } else current_offset = *offset; error = fo_sendfile(fp, out, NULL, NULL, current_offset, count, sbytes, 0, td); if (error == 0) { current_offset += *sbytes; if (offset != NULL) *offset = current_offset; else error = fo_seek(fp, current_offset, SEEK_SET, td); } return (error); } static int linux_sendfile_common(struct thread *td, l_int out, l_int in, off_t *offset, l_size_t count) { struct file *fp, *ofp; off_t sbytes; int error; /* Linux cannot have 0 count. */ if (count <= 0 || (offset != NULL && *offset < 0)) return (EINVAL); AUDIT_ARG_FD(in); error = fget_read(td, in, &cap_pread_rights, &fp); if (error != 0) return (error); if ((fp->f_type != DTYPE_SHM && fp->f_type != DTYPE_VNODE) || (fp->f_type == DTYPE_VNODE && (fp->f_vnode == NULL || fp->f_vnode->v_type != VREG))) { error = EINVAL; goto drop; } error = fget_unlocked(td, out, &cap_no_rights, &ofp); if (error != 0) goto drop; if (is_regular_file(fp) && is_regular_file(ofp)) { error = kern_copy_file_range(td, in, offset, out, NULL, count, 0); } else { sbytes = 0; if (is_sendfile(fp, ofp)) error = sendfile_sendfile(td, fp, out, offset, count, &sbytes); else error = sendfile_fallback(td, fp, out, offset, count, &sbytes); if (error == ENOBUFS && (ofp->f_flag & FNONBLOCK) != 0) error = EAGAIN; if (error == 0) td->td_retval[0] = sbytes; } fdrop(ofp, td); drop: fdrop(fp, td); return (error); } int linux_sendfile(struct thread *td, struct linux_sendfile_args *arg) { /* * Differences between FreeBSD and Linux sendfile: * - Linux doesn't send anything when count is 0 (FreeBSD uses 0 to * mean send the whole file). * - Linux can send to any fd whereas FreeBSD only supports sockets. * We therefore use FreeBSD sendfile where possible for performance, * but fall back on a manual copy (sendfile_fallback). * - Linux doesn't have an equivalent for FreeBSD's flags and sf_hdtr. * - Linux takes an offset pointer and updates it to the read location. * FreeBSD takes in an offset and a 'bytes read' parameter which is * only filled if it isn't NULL. We use this parameter to update the * offset pointer if it exists. * - Linux sendfile returns bytes read on success while FreeBSD * returns 0. We use the 'bytes read' parameter to get this value. */ off_t offset64; l_off_t offset; int error; if (arg->offset != NULL) { error = copyin(arg->offset, &offset, sizeof(offset)); if (error != 0) return (error); offset64 = offset; } error = linux_sendfile_common(td, arg->out, arg->in, arg->offset != NULL ? &offset64 : NULL, arg->count); if (error == 0 && arg->offset != NULL) { #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) if (offset64 > INT32_MAX) return (EOVERFLOW); #endif offset = (l_off_t)offset64; error = copyout(&offset, arg->offset, sizeof(offset)); } return (error); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_sendfile64(struct thread *td, struct linux_sendfile64_args *arg) { off_t offset; int error; if (arg->offset != NULL) { error = copyin(arg->offset, &offset, sizeof(offset)); if (error != 0) return (error); } error = linux_sendfile_common(td, arg->out, arg->in, arg->offset != NULL ? &offset : NULL, arg->count); if (error == 0 && arg->offset != NULL) error = copyout(&offset, arg->offset, sizeof(offset)); return (error); } /* Argument list sizes for linux_socketcall */ static const unsigned char lxs_args_cnt[] = { 0 /* unused*/, 3 /* socket */, 3 /* bind */, 3 /* connect */, 2 /* listen */, 3 /* accept */, 3 /* getsockname */, 3 /* getpeername */, 4 /* socketpair */, 4 /* send */, 4 /* recv */, 6 /* sendto */, 6 /* recvfrom */, 2 /* shutdown */, 5 /* setsockopt */, 5 /* getsockopt */, 3 /* sendmsg */, 3 /* recvmsg */, 4 /* accept4 */, 5 /* recvmmsg */, 4 /* sendmmsg */, 4 /* sendfile */ }; #define LINUX_ARGS_CNT (nitems(lxs_args_cnt) - 1) #define LINUX_ARG_SIZE(x) (lxs_args_cnt[x] * sizeof(l_ulong)) int linux_socketcall(struct thread *td, struct linux_socketcall_args *args) { l_ulong a[6]; #if defined(__amd64__) && defined(COMPAT_LINUX32) register_t l_args[6]; #endif void *arg; int error; if (args->what < LINUX_SOCKET || args->what > LINUX_ARGS_CNT) return (EINVAL); error = copyin(PTRIN(args->args), a, LINUX_ARG_SIZE(args->what)); if (error != 0) return (error); #if defined(__amd64__) && defined(COMPAT_LINUX32) for (int i = 0; i < lxs_args_cnt[args->what]; ++i) l_args[i] = a[i]; arg = l_args; #else arg = a; #endif switch (args->what) { case LINUX_SOCKET: return (linux_socket(td, arg)); case LINUX_BIND: return (linux_bind(td, arg)); case LINUX_CONNECT: return (linux_connect(td, arg)); case LINUX_LISTEN: return (linux_listen(td, arg)); case LINUX_ACCEPT: return (linux_accept(td, arg)); case LINUX_GETSOCKNAME: return (linux_getsockname(td, arg)); case LINUX_GETPEERNAME: return (linux_getpeername(td, arg)); case LINUX_SOCKETPAIR: return (linux_socketpair(td, arg)); case LINUX_SEND: return (linux_send(td, arg)); case LINUX_RECV: return (linux_recv(td, arg)); case LINUX_SENDTO: return (linux_sendto(td, arg)); case LINUX_RECVFROM: return (linux_recvfrom(td, arg)); case LINUX_SHUTDOWN: return (linux_shutdown(td, arg)); case LINUX_SETSOCKOPT: return (linux_setsockopt(td, arg)); case LINUX_GETSOCKOPT: return (linux_getsockopt(td, arg)); case LINUX_SENDMSG: return (linux_sendmsg(td, arg)); case LINUX_RECVMSG: return (linux_recvmsg(td, arg)); case LINUX_ACCEPT4: return (linux_accept4(td, arg)); case LINUX_RECVMMSG: return (linux_recvmmsg(td, arg)); case LINUX_SENDMMSG: return (linux_sendmmsg(td, arg)); case LINUX_SENDFILE: return (linux_sendfile(td, arg)); } linux_msg(td, "socket type %d not implemented", args->what); return (ENOSYS); } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ diff --git a/sys/compat/linuxkpi/common/include/linux/net.h b/sys/compat/linuxkpi/common/include/linux/net.h index f750de8c5cf3..dc0b46df9c99 100644 --- a/sys/compat/linuxkpi/common/include/linux/net.h +++ b/sys/compat/linuxkpi/common/include/linux/net.h @@ -1,87 +1,88 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_NET_H_ #define _LINUXKPI_LINUX_NET_H_ #include #include #include #include #include #include #include static inline int sock_create_kern(int family, int type, int proto, struct socket **res) { return -socreate(family, res, type, proto, curthread->td_ucred, curthread); } static inline int -sock_getname(struct socket *so, struct sockaddr *addr, int *sockaddr_len, +sock_getname(struct socket *so, struct sockaddr *sa, int *sockaddr_len, int peer) { - struct sockaddr *nam; int error; - nam = NULL; + /* + * XXXGL: we can't use sopeeraddr()/sosockaddr() here since with + * INVARIANTS they would check if supplied sockaddr has enough + * length. Such notion doesn't even exist in Linux KPI. + */ if (peer) { if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) return (-ENOTCONN); - error = so->so_proto->pr_peeraddr(so, &nam); + error = so->so_proto->pr_peeraddr(so, sa); } else - error = so->so_proto->pr_sockaddr(so, &nam); + error = so->so_proto->pr_sockaddr(so, sa); if (error) return (-error); - *addr = *nam; - *sockaddr_len = addr->sa_len; + *sockaddr_len = sa->sa_len; - free(nam, M_SONAME); return (0); } static inline void sock_release(struct socket *so) { soclose(so); } int linuxkpi_net_ratelimit(void); static inline int net_ratelimit(void) { return (linuxkpi_net_ratelimit()); } #endif /* _LINUXKPI_LINUX_NET_H_ */ diff --git a/sys/dev/cxgbe/iw_cxgbe/cm.c b/sys/dev/cxgbe/iw_cxgbe/cm.c index 84d6df3f2832..e0e48fdff9ba 100644 --- a/sys/dev/cxgbe/iw_cxgbe/cm.c +++ b/sys/dev/cxgbe/iw_cxgbe/cm.c @@ -1,3065 +1,3059 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include "opt_inet.h" #ifdef TCP_OFFLOAD #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct sge_iq; struct rss_header; struct cpl_set_tcb_rpl; #include #include "offload.h" #include "tom/t4_tom.h" #define TOEPCB(so) ((struct toepcb *)(sototcpcb((so))->t_toe)) #include "iw_cxgbe.h" #include #include #include #include #include static spinlock_t req_lock; static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list; static struct work_struct c4iw_task; static struct workqueue_struct *c4iw_taskq; static LIST_HEAD(err_cqe_list); static spinlock_t err_cqe_lock; static LIST_HEAD(listen_port_list); static DEFINE_MUTEX(listen_port_mutex); static void process_req(struct work_struct *ctx); static void start_ep_timer(struct c4iw_ep *ep); static int stop_ep_timer(struct c4iw_ep *ep); static int set_tcpinfo(struct c4iw_ep *ep); static void process_timeout(struct c4iw_ep *ep); static void process_err_cqes(void); static void *alloc_ep(int size, gfp_t flags); static void close_socket(struct socket *so); static int send_mpa_req(struct c4iw_ep *ep); static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen); static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen); static void close_complete_upcall(struct c4iw_ep *ep, int status); static int send_abort(struct c4iw_ep *ep); static void peer_close_upcall(struct c4iw_ep *ep); static void peer_abort_upcall(struct c4iw_ep *ep); static void connect_reply_upcall(struct c4iw_ep *ep, int status); static int connect_request_upcall(struct c4iw_ep *ep); static void established_upcall(struct c4iw_ep *ep); static int process_mpa_reply(struct c4iw_ep *ep); static int process_mpa_request(struct c4iw_ep *ep); static void process_peer_close(struct c4iw_ep *ep); static void process_conn_error(struct c4iw_ep *ep); static void process_close_complete(struct c4iw_ep *ep); static void ep_timeout(unsigned long arg); static void setiwsockopt(struct socket *so); static void init_iwarp_socket(struct socket *so, void *arg); static void uninit_iwarp_socket(struct socket *so); static void process_data(struct c4iw_ep *ep); static void process_connected(struct c4iw_ep *ep); static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag); static void process_socket_event(struct c4iw_ep *ep); static void release_ep_resources(struct c4iw_ep *ep); static int process_terminate(struct c4iw_ep *ep); static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m); static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events); static struct listen_port_info * add_ep_to_listenlist(struct c4iw_listen_ep *lep); static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep); static struct c4iw_listen_ep * find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so); static int get_ifnet_from_raddr(struct sockaddr_storage *raddr, if_t *ifp); static void process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so); #define START_EP_TIMER(ep) \ do { \ CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \ __func__, __LINE__, (ep)); \ start_ep_timer(ep); \ } while (0) #define STOP_EP_TIMER(ep) \ ({ \ CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \ __func__, __LINE__, (ep)); \ stop_ep_timer(ep); \ }) #define GET_LOCAL_ADDR(pladdr, so) \ do { \ - struct sockaddr_storage *__a = NULL; \ struct inpcb *__inp = sotoinpcb(so); \ KASSERT(__inp != NULL, \ ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \ if (__inp->inp_vflag & INP_IPV4) \ - in_getsockaddr(so, (struct sockaddr **)&__a); \ + in_getsockaddr(so, (struct sockaddr *)pladdr); \ else \ - in6_getsockaddr(so, (struct sockaddr **)&__a); \ - *(pladdr) = *__a; \ - free(__a, M_SONAME); \ + in6_getsockaddr(so, (struct sockaddr *)pladdr); \ } while (0) #define GET_REMOTE_ADDR(praddr, so) \ do { \ - struct sockaddr_storage *__a = NULL; \ struct inpcb *__inp = sotoinpcb(so); \ KASSERT(__inp != NULL, \ ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \ if (__inp->inp_vflag & INP_IPV4) \ - in_getpeeraddr(so, (struct sockaddr **)&__a); \ + in_getpeeraddr(so, (struct sockaddr *)praddr); \ else \ - in6_getpeeraddr(so, (struct sockaddr **)&__a); \ - *(praddr) = *__a; \ - free(__a, M_SONAME); \ + in6_getpeeraddr(so, (struct sockaddr *)praddr); \ } while (0) static char *states[] = { "idle", "listen", "connecting", "mpa_wait_req", "mpa_req_sent", "mpa_req_rcvd", "mpa_rep_sent", "fpdu_mode", "aborting", "closing", "moribund", "dead", NULL, }; static void deref_cm_id(struct c4iw_ep_common *epc) { epc->cm_id->rem_ref(epc->cm_id); epc->cm_id = NULL; set_bit(CM_ID_DEREFED, &epc->history); } static void ref_cm_id(struct c4iw_ep_common *epc) { set_bit(CM_ID_REFED, &epc->history); epc->cm_id->add_ref(epc->cm_id); } static void deref_qp(struct c4iw_ep *ep) { c4iw_qp_rem_ref(&ep->com.qp->ibqp); clear_bit(QP_REFERENCED, &ep->com.flags); set_bit(QP_DEREFED, &ep->com.history); } static void ref_qp(struct c4iw_ep *ep) { set_bit(QP_REFERENCED, &ep->com.flags); set_bit(QP_REFED, &ep->com.history); c4iw_qp_add_ref(&ep->com.qp->ibqp); } /* allocated per TCP port while listening */ struct listen_port_info { uint16_t port_num; /* TCP port address */ struct list_head list; /* belongs to listen_port_list */ struct list_head lep_list; /* per port lep list */ uint32_t refcnt; /* number of lep's listening */ }; /* * Following two lists are used to manage INADDR_ANY listeners: * 1)listen_port_list * 2)lep_list * * Below is the INADDR_ANY listener lists overview on a system with a two port * adapter: * |------------------| * |listen_port_list | * |------------------| * | * | |-----------| |-----------| * | | port_num:X| | port_num:X| * |--------------|-list------|-------|-list------|-------.... * | lep_list----| | lep_list----| * | refcnt | | | refcnt | | * | | | | | | * | | | | | | * |-----------| | |-----------| | * | | * | | * | | * | | lep1 lep2 * | | |----------------| |----------------| * | |----| listen_ep_list |----| listen_ep_list | * | |----------------| |----------------| * | * | * | lep1 lep2 * | |----------------| |----------------| * |---| listen_ep_list |----| listen_ep_list | * |----------------| |----------------| * * Because of two port adapter, the number of lep's are two(lep1 & lep2) for * each TCP port number. * * Here 'lep1' is always marked as Master lep, because solisten() is always * called through first lep. * */ static struct listen_port_info * add_ep_to_listenlist(struct c4iw_listen_ep *lep) { uint16_t port; struct listen_port_info *port_info = NULL; struct sockaddr_storage *laddr = &lep->com.local_addr; port = (laddr->ss_family == AF_INET) ? ((struct sockaddr_in *)laddr)->sin_port : ((struct sockaddr_in6 *)laddr)->sin6_port; mutex_lock(&listen_port_mutex); list_for_each_entry(port_info, &listen_port_list, list) if (port_info->port_num == port) goto found_port; port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK); port_info->port_num = port; port_info->refcnt = 0; list_add_tail(&port_info->list, &listen_port_list); INIT_LIST_HEAD(&port_info->lep_list); found_port: port_info->refcnt++; list_add_tail(&lep->listen_ep_list, &port_info->lep_list); mutex_unlock(&listen_port_mutex); return port_info; } static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep) { uint16_t port; struct listen_port_info *port_info = NULL; struct sockaddr_storage *laddr = &lep->com.local_addr; int refcnt = 0; port = (laddr->ss_family == AF_INET) ? ((struct sockaddr_in *)laddr)->sin_port : ((struct sockaddr_in6 *)laddr)->sin6_port; mutex_lock(&listen_port_mutex); /* get the port_info structure based on the lep's port address */ list_for_each_entry(port_info, &listen_port_list, list) { if (port_info->port_num == port) { port_info->refcnt--; refcnt = port_info->refcnt; /* remove the current lep from the listen list */ list_del(&lep->listen_ep_list); if (port_info->refcnt == 0) { /* Remove this entry from the list as there * are no more listeners for this port_num. */ list_del(&port_info->list); kfree(port_info); } break; } } mutex_unlock(&listen_port_mutex); return refcnt; } /* * Find the lep that belongs to the ifnet on which the SYN frame was received. */ struct c4iw_listen_ep * find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so) { struct adapter *adap = NULL; struct c4iw_listen_ep *lep = NULL; if_t ifp = NULL, hw_ifp = NULL; struct listen_port_info *port_info = NULL; int i = 0, found_portinfo = 0, found_lep = 0; uint16_t port; /* * STEP 1: Figure out 'ifp' of the physical interface, not pseudo * interfaces like vlan, lagg, etc.. * TBD: lagg support, lagg + vlan support. */ ifp = TOEPCB(so)->l2te->ifp; if (if_gettype(ifp) == IFT_L2VLAN) { hw_ifp = VLAN_TRUNKDEV(ifp); if (hw_ifp == NULL) { CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of " "vlan ifnet %p, sock %p, master_lep %p", __func__, ifp, so, master_lep); return (NULL); } } else hw_ifp = ifp; /* STEP 2: Find 'port_info' with listener local port address. */ port = (master_lep->com.local_addr.ss_family == AF_INET) ? ((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port : ((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port; mutex_lock(&listen_port_mutex); list_for_each_entry(port_info, &listen_port_list, list) if (port_info->port_num == port) { found_portinfo =1; break; } if (!found_portinfo) goto out; /* STEP 3: Traverse through list of lep's that are bound to the current * TCP port address and find the lep that belongs to the ifnet on which * the SYN frame was received. */ list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) { adap = lep->com.dev->rdev.adap; for_each_port(adap, i) { if (hw_ifp == adap->port[i]->vi[0].ifp) { found_lep =1; goto out; } } } out: mutex_unlock(&listen_port_mutex); return found_lep ? lep : (NULL); } static void process_timeout(struct c4iw_ep *ep) { struct c4iw_qp_attributes attrs = {0}; int abort = 1; CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__, ep, ep->hwtid, ep->com.state); set_bit(TIMEDOUT, &ep->com.history); switch (ep->com.state) { case MPA_REQ_SENT: connect_reply_upcall(ep, -ETIMEDOUT); break; case MPA_REQ_WAIT: case MPA_REQ_RCVD: case MPA_REP_SENT: case FPDU_MODE: break; case CLOSING: case MORIBUND: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_ERROR; c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep, -ETIMEDOUT); break; case ABORTING: case DEAD: /* * These states are expected if the ep timed out at the same * time as another thread was calling stop_ep_timer(). * So we silently do nothing for these states. */ abort = 0; break; default: CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u" , __func__, ep, ep->hwtid, ep->com.state); abort = 0; } if (abort) c4iw_ep_disconnect(ep, 1, GFP_KERNEL); c4iw_put_ep(&ep->com); return; } struct cqe_list_entry { struct list_head entry; struct c4iw_dev *rhp; struct t4_cqe err_cqe; }; static void process_err_cqes(void) { unsigned long flag; struct cqe_list_entry *cle; spin_lock_irqsave(&err_cqe_lock, flag); while (!list_empty(&err_cqe_list)) { struct list_head *tmp; tmp = err_cqe_list.next; list_del(tmp); tmp->next = tmp->prev = NULL; spin_unlock_irqrestore(&err_cqe_lock, flag); cle = list_entry(tmp, struct cqe_list_entry, entry); c4iw_ev_dispatch(cle->rhp, &cle->err_cqe); free(cle, M_CXGBE); spin_lock_irqsave(&err_cqe_lock, flag); } spin_unlock_irqrestore(&err_cqe_lock, flag); return; } static void process_req(struct work_struct *ctx) { struct c4iw_ep_common *epc; unsigned long flag; int ep_events; process_err_cqes(); spin_lock_irqsave(&req_lock, flag); while (!TAILQ_EMPTY(&req_list)) { epc = TAILQ_FIRST(&req_list); TAILQ_REMOVE(&req_list, epc, entry); epc->entry.tqe_prev = NULL; ep_events = epc->ep_events; epc->ep_events = 0; spin_unlock_irqrestore(&req_lock, flag); mutex_lock(&epc->mutex); CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x", __func__, epc->so, epc, states[epc->state], ep_events); if (ep_events & C4IW_EVENT_TERM) process_terminate((struct c4iw_ep *)epc); if (ep_events & C4IW_EVENT_TIMEOUT) process_timeout((struct c4iw_ep *)epc); if (ep_events & C4IW_EVENT_SOCKET) process_socket_event((struct c4iw_ep *)epc); mutex_unlock(&epc->mutex); c4iw_put_ep(epc); process_err_cqes(); spin_lock_irqsave(&req_lock, flag); } spin_unlock_irqrestore(&req_lock, flag); } /* * XXX: doesn't belong here in the iWARP driver. * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is * set. Is this a valid assumption for active open? */ static int set_tcpinfo(struct c4iw_ep *ep) { struct socket *so = ep->com.so; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp; struct toepcb *toep; int rc = 0; INP_WLOCK(inp); tp = intotcpcb(inp); if ((tp->t_flags & TF_TOE) == 0) { rc = EINVAL; log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n", __func__, so, ep); goto done; } toep = TOEPCB(so); ep->hwtid = toep->tid; ep->snd_seq = tp->snd_nxt; ep->rcv_seq = tp->rcv_nxt; done: INP_WUNLOCK(inp); return (rc); } static int get_ifnet_from_raddr(struct sockaddr_storage *raddr, if_t *ifp) { int err = 0; struct nhop_object *nh; if (raddr->ss_family == AF_INET) { struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr; nh = fib4_lookup(RT_DEFAULT_FIB, raddr4->sin_addr, 0, NHR_NONE, 0); } else { struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr; struct in6_addr addr6; uint32_t scopeid; memset(&addr6, 0, sizeof(addr6)); in6_splitscope((struct in6_addr *)&raddr6->sin6_addr, &addr6, &scopeid); nh = fib6_lookup(RT_DEFAULT_FIB, &addr6, scopeid, NHR_NONE, 0); } if (nh == NULL) err = EHOSTUNREACH; else *ifp = nh->nh_ifp; CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err); return err; } static void close_socket(struct socket *so) { uninit_iwarp_socket(so); soclose(so); } static void process_peer_close(struct c4iw_ep *ep) { struct c4iw_qp_attributes attrs = {0}; int disconnect = 1; int release = 0; CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep, ep->com.so, states[ep->com.state]); switch (ep->com.state) { case MPA_REQ_WAIT: CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD", __func__, ep); /* Fallthrough */ case MPA_REQ_SENT: CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD", __func__, ep); ep->com.state = DEAD; connect_reply_upcall(ep, -ECONNABORTED); disconnect = 0; STOP_EP_TIMER(ep); close_socket(ep->com.so); deref_cm_id(&ep->com); release = 1; break; case MPA_REQ_RCVD: /* * We're gonna mark this puppy DEAD, but keep * the reference on it until the ULP accepts or * rejects the CR. */ CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING", __func__, ep); ep->com.state = CLOSING; break; case MPA_REP_SENT: CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING", __func__, ep); ep->com.state = CLOSING; break; case FPDU_MODE: CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING", __func__, ep); START_EP_TIMER(ep); ep->com.state = CLOSING; attrs.next_state = C4IW_QP_STATE_CLOSING; c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); peer_close_upcall(ep); break; case ABORTING: CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)", __func__, ep); disconnect = 0; break; case CLOSING: CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND", __func__, ep); ep->com.state = MORIBUND; disconnect = 0; break; case MORIBUND: CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__, ep); STOP_EP_TIMER(ep); if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_socket(ep->com.so); close_complete_upcall(ep, 0); ep->com.state = DEAD; release = 1; disconnect = 0; break; case DEAD: CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)", __func__, ep); disconnect = 0; break; default: panic("%s: ep %p state %d", __func__, ep, ep->com.state); break; } if (disconnect) { CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep); c4iw_ep_disconnect(ep, 0, M_NOWAIT); } if (release) { CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep); c4iw_put_ep(&ep->com); } CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep); return; } static void process_conn_error(struct c4iw_ep *ep) { struct c4iw_qp_attributes attrs = {0}; int ret; int state; state = ep->com.state; CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s", __func__, ep, ep->com.so, ep->com.so->so_error, states[ep->com.state]); switch (state) { case MPA_REQ_WAIT: STOP_EP_TIMER(ep); c4iw_put_ep(&ep->parent_ep->com); break; case MPA_REQ_SENT: STOP_EP_TIMER(ep); connect_reply_upcall(ep, -ECONNRESET); break; case MPA_REP_SENT: ep->com.rpl_err = ECONNRESET; CTR1(KTR_IW_CXGBE, "waking up ep %p", ep); break; case MPA_REQ_RCVD: break; case MORIBUND: case CLOSING: STOP_EP_TIMER(ep); /*FALLTHROUGH*/ case FPDU_MODE: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_ERROR; ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret) log(LOG_ERR, "%s - qp <- error failed!\n", __func__); } peer_abort_upcall(ep); break; case ABORTING: break; case DEAD: CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!", __func__, ep->com.so->so_error); return; default: panic("%s: ep %p state %d", __func__, ep, state); break; } if (state != ABORTING) { close_socket(ep->com.so); ep->com.state = DEAD; c4iw_put_ep(&ep->com); } CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep); return; } static void process_close_complete(struct c4iw_ep *ep) { struct c4iw_qp_attributes attrs = {0}; int release = 0; CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep, ep->com.so, states[ep->com.state]); /* The cm_id may be null if we failed to connect */ set_bit(CLOSE_CON_RPL, &ep->com.history); switch (ep->com.state) { case CLOSING: CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND", __func__, ep); ep->com.state = MORIBUND; break; case MORIBUND: CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__, ep); STOP_EP_TIMER(ep); if ((ep->com.cm_id) && (ep->com.qp)) { CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE", __func__, ep); attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_socket(ep->com.so); close_complete_upcall(ep, 0); ep->com.state = DEAD; release = 1; break; case ABORTING: CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep); break; case DEAD: CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep); break; default: CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state", __func__, ep); panic("%s:pcc6 %p unknown ep state", __func__, ep); break; } if (release) { CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep); release_ep_resources(ep); } CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep); return; } static void setiwsockopt(struct socket *so) { int rc; struct sockopt sopt; int on = 1; sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = (caddr_t)&on; sopt.sopt_valsize = sizeof on; sopt.sopt_td = NULL; rc = -sosetopt(so, &sopt); if (rc) { log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n", __func__, so, rc); } } static void init_iwarp_socket(struct socket *so, void *arg) { if (SOLISTENING(so)) { SOLISTEN_LOCK(so); solisten_upcall_set(so, c4iw_so_upcall, arg); so->so_state |= SS_NBIO; SOLISTEN_UNLOCK(so); } else { SOCKBUF_LOCK(&so->so_rcv); soupcall_set(so, SO_RCV, c4iw_so_upcall, arg); so->so_state |= SS_NBIO; SOCKBUF_UNLOCK(&so->so_rcv); } } static void uninit_iwarp_socket(struct socket *so) { if (SOLISTENING(so)) { SOLISTEN_LOCK(so); solisten_upcall_set(so, NULL, NULL); SOLISTEN_UNLOCK(so); } else { SOCKBUF_LOCK(&so->so_rcv); soupcall_clear(so, SO_RCV); SOCKBUF_UNLOCK(&so->so_rcv); } } static void process_data(struct c4iw_ep *ep) { int ret = 0; int disconnect = 0; struct c4iw_qp_attributes attrs = {0}; CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__, ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv)); switch (ep->com.state) { case MPA_REQ_SENT: disconnect = process_mpa_reply(ep); break; case MPA_REQ_WAIT: disconnect = process_mpa_request(ep); if (disconnect) /* Refered in process_newconn() */ c4iw_put_ep(&ep->parent_ep->com); break; case FPDU_MODE: MPASS(ep->com.qp != NULL); attrs.next_state = C4IW_QP_STATE_TERMINATE; ret = c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret != -EINPROGRESS) disconnect = 1; break; default: log(LOG_ERR, "%s: Unexpected streaming data. ep %p, " "state %d, so %p, so_state 0x%x, sbused %u\n", __func__, ep, ep->com.state, ep->com.so, ep->com.so->so_state, sbused(&ep->com.so->so_rcv)); break; } if (disconnect) c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); } static void process_connected(struct c4iw_ep *ep) { struct socket *so = ep->com.so; if ((so->so_state & SS_ISCONNECTED) && !so->so_error) { if (send_mpa_req(ep)) goto err; } else { connect_reply_upcall(ep, -so->so_error); goto err; } return; err: close_socket(so); ep->com.state = DEAD; c4iw_put_ep(&ep->com); return; } static inline bool c4iw_zero_addr(struct sockaddr *addr) { struct in6_addr *ip6; if (addr->sa_family == AF_INET) return (((struct sockaddr_in *)addr)->sin_addr.s_addr == 0); else { ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; } } #define _IN_LOOPBACK(i) (((in_addr_t)(i) & 0xff000000) == 0x7f000000) static inline bool c4iw_loopback_addr(struct sockaddr *addr, struct vnet *vnet) { bool ret; if (addr->sa_family == AF_INET) { if (vnet == NULL) ret = _IN_LOOPBACK(ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr)); else { CURVNET_SET_QUIET(vnet); ret = IN_LOOPBACK(ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr)); CURVNET_RESTORE(); } } else { ret = IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6 *) addr)->sin6_addr); } return (ret); } #undef _IN_LOOPBACK static inline bool c4iw_any_addr(struct sockaddr *addr, struct vnet *vnet) { return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr, vnet); } static void process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so) { struct c4iw_listen_ep *real_lep = NULL; struct c4iw_ep *new_ep = NULL; struct sockaddr_storage remote = { .ss_len = sizeof(remote) }; int ret = 0; MPASS(new_so != NULL); if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr, new_so->so_vnet)) { /* Here we need to find the 'real_lep' that belongs to the * incomming socket's network interface, such that the newly * created 'ep' can be attached to the real 'lep'. */ real_lep = find_real_listen_ep(master_lep, new_so); if (real_lep == NULL) { CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen " "ep for sock: %p", __func__, new_so); log(LOG_ERR,"%s: Could not find the real listen ep for " "sock: %p\n", __func__, new_so); /* FIXME: properly free the 'new_so' in failure case. * Use of soabort() and soclose() are not legal * here(before soaccept()). */ return; } } else /* for Non-Wildcard address, master_lep is always the real_lep */ real_lep = master_lep; new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL); CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, " "listening so %p, new so %p", __func__, master_lep, real_lep, new_ep, master_lep->com.so, new_so); new_ep->com.dev = real_lep->com.dev; new_ep->com.so = new_so; new_ep->com.cm_id = NULL; new_ep->com.thread = real_lep->com.thread; new_ep->parent_ep = real_lep; GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so); GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so); c4iw_get_ep(&real_lep->com); init_timer(&new_ep->timer); new_ep->com.state = MPA_REQ_WAIT; setiwsockopt(new_so); ret = soaccept(new_so, (struct sockaddr *)&remote); if (ret != 0) { CTR4(KTR_IW_CXGBE, "%s:listen sock:%p, new sock:%p, ret:%d", __func__, master_lep->com.so, new_so, ret); soclose(new_so); c4iw_put_ep(&new_ep->com); c4iw_put_ep(&real_lep->com); return; } START_EP_TIMER(new_ep); /* MPA request might have been queued up on the socket already, so we * initialize the socket/upcall_handler under lock to prevent processing * MPA request on another thread(via process_req()) simultaneously. */ c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to avoid freeing of ep before ep unlock. */ mutex_lock(&new_ep->com.mutex); init_iwarp_socket(new_so, &new_ep->com); ret = process_mpa_request(new_ep); if (ret) { /* ABORT */ c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL); c4iw_put_ep(&real_lep->com); } mutex_unlock(&new_ep->com.mutex); c4iw_put_ep(&new_ep->com); return; } static int add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event) { unsigned long flag; spin_lock_irqsave(&req_lock, flag); if (ep && ep->com.so) { ep->com.ep_events |= new_ep_event; if (!ep->com.entry.tqe_prev) { c4iw_get_ep(&ep->com); TAILQ_INSERT_TAIL(&req_list, &ep->com, entry); queue_work(c4iw_taskq, &c4iw_task); } } spin_unlock_irqrestore(&req_lock, flag); return (0); } static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag) { struct c4iw_ep *ep = arg; CTR6(KTR_IW_CXGBE, "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p", __func__, so, so->so_state, ep, states[ep->com.state], ep->com.entry.tqe_prev); MPASS(ep->com.so == so); /* * Wake up any threads waiting in rdma_init()/rdma_fini(), * with locks held. */ if (so->so_error || (ep->com.dev->rdev.flags & T4_FATAL_ERROR)) c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); add_ep_to_req_list(ep, C4IW_EVENT_SOCKET); return (SU_OK); } static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_rdma_terminate *cpl = mtod(m, const void *); unsigned int tid = GET_TID(cpl); struct toepcb *toep = lookup_tid(sc, tid); struct socket *so; struct c4iw_ep *ep; INP_WLOCK(toep->inp); so = inp_inpcbtosocket(toep->inp); ep = so->so_rcv.sb_upcallarg; INP_WUNLOCK(toep->inp); CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep); add_ep_to_req_list(ep, C4IW_EVENT_TERM); return 0; } static void process_socket_event(struct c4iw_ep *ep) { int state = ep->com.state; struct socket *so = ep->com.so; if (ep->com.state == DEAD) { CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded " "ep %p ep_state %s", __func__, ep, states[state]); return; } CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, " "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state, so->so_error, so->so_rcv.sb_state, ep, states[state]); if (state == CONNECTING) { process_connected(ep); return; } if (state == LISTEN) { struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep; struct socket *listen_so = so, *new_so = NULL; int error = 0; SOLISTEN_LOCK(listen_so); do { error = solisten_dequeue(listen_so, &new_so, SOCK_NONBLOCK); if (error) { CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p " "error %d", __func__, lep, listen_so, error); return; } process_newconn(lep, new_so); /* solisten_dequeue() unlocks while return, so aquire * lock again for sol_qlen and also for next iteration. */ SOLISTEN_LOCK(listen_so); } while (listen_so->sol_qlen); SOLISTEN_UNLOCK(listen_so); return; } /* connection error */ if (so->so_error) { process_conn_error(ep); return; } /* peer close */ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) { process_peer_close(ep); /* * check whether socket disconnect event is pending before * returning. Fallthrough if yes. */ if (!(so->so_state & SS_ISDISCONNECTED)) return; } /* close complete */ if (so->so_state & SS_ISDISCONNECTED) { process_close_complete(ep); return; } /* rx data */ if (sbused(&ep->com.so->so_rcv)) { process_data(ep); return; } /* Socket events for 'MPA Request Received' and 'Close Complete' * were already processed earlier in their previous events handlers. * Hence, these socket events are skipped. * And any other socket events must have handled above. */ MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND)); if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND)) log(LOG_ERR, "%s: Unprocessed socket event so %p, " "so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n", __func__, so, so->so_state, so->so_error, so->so_rcv.sb_state, ep, states[state]); } SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "iw_cxgbe driver parameters"); static int dack_mode = 0; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0, "Delayed ack mode (default = 0)"); int c4iw_max_read_depth = 8; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0, "Per-connection max ORD/IRD (default = 8)"); static int enable_tcp_timestamps; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0, "Enable tcp timestamps (default = 0)"); static int enable_tcp_sack; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0, "Enable tcp SACK (default = 0)"); static int enable_tcp_window_scaling = 1; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0, "Enable tcp window scaling (default = 1)"); int c4iw_debug = 0; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0, "Enable debug logging (default = 0)"); static int peer2peer = 1; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0, "Support peer2peer ULPs (default = 1)"); static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0, "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)"); static int ep_timeout_secs = 60; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0, "CM Endpoint operation timeout in seconds (default = 60)"); static int mpa_rev = 1; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0, "MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)"); static int markers_enabled; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0, "Enable MPA MARKERS (default(0) = disabled)"); static int crc_enabled = 1; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0, "Enable MPA CRC (default(1) = enabled)"); static int rcv_win = 256 * 1024; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0, "TCP receive window in bytes (default = 256KB)"); static int snd_win = 128 * 1024; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0, "TCP send window in bytes (default = 128KB)"); int use_dsgl = 1; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0, "Use DSGL for PBL/FastReg (default=1)"); int inline_threshold = 128; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0, "inline vs dsgl threshold (default=128)"); static int reuseaddr = 0; SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0, "Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)"); static void start_ep_timer(struct c4iw_ep *ep) { if (timer_pending(&ep->timer)) { CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep); printk(KERN_ERR "%s timer already started! ep %p\n", __func__, ep); return; } clear_bit(TIMEOUT, &ep->com.flags); c4iw_get_ep(&ep->com); ep->timer.expires = jiffies + ep_timeout_secs * HZ; ep->timer.data = (unsigned long)ep; ep->timer.function = ep_timeout; add_timer(&ep->timer); } static int stop_ep_timer(struct c4iw_ep *ep) { del_timer_sync(&ep->timer); if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { c4iw_put_ep(&ep->com); return 0; } return 1; } static void * alloc_ep(int size, gfp_t gfp) { struct c4iw_ep_common *epc; epc = kzalloc(size, gfp); if (epc == NULL) return (NULL); kref_init(&epc->kref); mutex_init(&epc->mutex); c4iw_init_wr_wait(&epc->wr_wait); return (epc); } void _c4iw_free_ep(struct kref *kref) { struct c4iw_ep *ep; #if defined(KTR) || defined(INVARIANTS) struct c4iw_ep_common *epc; #endif ep = container_of(kref, struct c4iw_ep, com.kref); #if defined(KTR) || defined(INVARIANTS) epc = &ep->com; #endif KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list", __func__, epc)); if (test_bit(QP_REFERENCED, &ep->com.flags)) deref_qp(ep); CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx", __func__, ep, epc->history, epc->flags); kfree(ep); } static void release_ep_resources(struct c4iw_ep *ep) { CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep); set_bit(RELEASE_RESOURCES, &ep->com.flags); c4iw_put_ep(&ep->com); CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep); } static int send_mpa_req(struct c4iw_ep *ep) { int mpalen; struct mpa_message *mpa; struct mpa_v2_conn_params mpa_v2_params; struct mbuf *m; char mpa_rev_to_use = mpa_rev; int err = 0; if (ep->retry_with_mpa_v1) mpa_rev_to_use = 1; mpalen = sizeof(*mpa) + ep->plen; if (mpa_rev_to_use == 2) mpalen += sizeof(struct mpa_v2_conn_params); mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); if (mpa == NULL) { err = -ENOMEM; CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d", __func__, ep, err); goto err; } memset(mpa, 0, mpalen); memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); mpa->flags = (crc_enabled ? MPA_CRC : 0) | (markers_enabled ? MPA_MARKERS : 0) | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); mpa->private_data_size = htons(ep->plen); mpa->revision = mpa_rev_to_use; if (mpa_rev_to_use == 1) { ep->tried_with_mpa_v1 = 1; ep->retry_with_mpa_v1 = 0; } if (mpa_rev_to_use == 2) { mpa->private_data_size = htons(ntohs(mpa->private_data_size) + sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer) { mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { mpa_v2_params.ord |= htons(MPA_V2_RDMA_WRITE_RTR); } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { mpa_v2_params.ord |= htons(MPA_V2_RDMA_READ_RTR); } } memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) { memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), ep->mpa_pkt + sizeof(*mpa), ep->plen); } } else { if (ep->plen) memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep); } m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); if (m == NULL) { err = -ENOMEM; CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d", __func__, ep, err); free(mpa, M_CXGBE); goto err; } m_copyback(m, 0, mpalen, (void *)mpa); free(mpa, M_CXGBE); err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); if (err) { CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d", __func__, ep, err); goto err; } START_EP_TIMER(ep); ep->com.state = MPA_REQ_SENT; ep->mpa_attr.initiator = 1; CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err); return 0; err: connect_reply_upcall(ep, err); CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err); return err; } static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) { int mpalen ; struct mpa_message *mpa; struct mpa_v2_conn_params mpa_v2_params; struct mbuf *m; int err; CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid, ep->plen); mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpalen += sizeof(struct mpa_v2_conn_params); CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep, ep->mpa_attr.version, mpalen); } mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); if (mpa == NULL) return (-ENOMEM); memset(mpa, 0, mpalen); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = MPA_REJECT; mpa->revision = mpa_rev; mpa->private_data_size = htons(plen); if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size = htons(ntohs(mpa->private_data_size) + sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons(((u16)ep->ird) | (peer2peer ? MPA_V2_PEER2PEER_MODEL : 0)); mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE ? MPA_V2_RDMA_WRITE_RTR : p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ ? MPA_V2_RDMA_READ_RTR : 0) : 0)); memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), pdata, plen); CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep, mpa_v2_params.ird, mpa_v2_params.ord, ep->plen); } else if (plen) memcpy(mpa->private_data, pdata, plen); m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); if (m == NULL) { free(mpa, M_CXGBE); return (-ENOMEM); } m_copyback(m, 0, mpalen, (void *)mpa); free(mpa, M_CXGBE); err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); if (!err) ep->snd_seq += mpalen; CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err); return err; } static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) { int mpalen; struct mpa_message *mpa; struct mbuf *m; struct mpa_v2_conn_params mpa_v2_params; int err; CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep); mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep, ep->mpa_attr.version); mpalen += sizeof(struct mpa_v2_conn_params); } mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); if (mpa == NULL) return (-ENOMEM); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | (markers_enabled ? MPA_MARKERS : 0); mpa->revision = ep->mpa_attr.version; mpa->private_data_size = htons(plen); if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size += htons(sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep, ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord); if (peer2peer && (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED)) { mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { mpa_v2_params.ord |= htons(MPA_V2_RDMA_WRITE_RTR); CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d", __func__, ep, p2p_type, mpa_v2_params.ird, mpa_v2_params.ord); } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { mpa_v2_params.ord |= htons(MPA_V2_RDMA_READ_RTR); CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d", __func__, ep, p2p_type, mpa_v2_params.ird, mpa_v2_params.ord); } } memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), pdata, plen); } else if (plen) memcpy(mpa->private_data, pdata, plen); m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); if (m == NULL) { free(mpa, M_CXGBE); return (-ENOMEM); } m_copyback(m, 0, mpalen, (void *)mpa); free(mpa, M_CXGBE); ep->com.state = MPA_REP_SENT; ep->snd_seq += mpalen; err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err); return err; } static void close_complete_upcall(struct c4iw_ep *ep, int status) { struct iw_cm_event event; CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CLOSE; event.status = status; if (ep->com.cm_id) { CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep); ep->com.cm_id->event_handler(ep->com.cm_id, &event); deref_cm_id(&ep->com); set_bit(CLOSE_UPCALL, &ep->com.history); } CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep); } static int send_abort(struct c4iw_ep *ep) { struct socket *so = ep->com.so; struct sockopt sopt; int rc; struct linger l; CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so, states[ep->com.state], ep->hwtid); l.l_onoff = 1; l.l_linger = 0; /* linger_time of 0 forces RST to be sent */ sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_LINGER; sopt.sopt_val = (caddr_t)&l; sopt.sopt_valsize = sizeof l; sopt.sopt_td = NULL; rc = -sosetopt(so, &sopt); if (rc != 0) { log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n", __func__, so, rc); } uninit_iwarp_socket(so); soclose(so); set_bit(ABORT_CONN, &ep->com.history); /* * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT * request it has sent. But the current TOE driver is not propagating * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work- * around de-refererece 'ep' here instead of doing it in abort_rpl() * handler(not yet implemented) of iw_cxgbe driver. */ release_ep_resources(ep); ep->com.state = DEAD; return (0); } static void peer_close_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_DISCONNECT; if (ep->com.cm_id) { CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep); ep->com.cm_id->event_handler(ep->com.cm_id, &event); set_bit(DISCONN_UPCALL, &ep->com.history); } CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep); } static void peer_abort_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CLOSE; event.status = -ECONNRESET; if (ep->com.cm_id) { CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep); ep->com.cm_id->event_handler(ep->com.cm_id, &event); deref_cm_id(&ep->com); set_bit(ABORT_UPCALL, &ep->com.history); } CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep); } static void connect_reply_upcall(struct c4iw_ep *ep, int status) { struct iw_cm_event event; CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REPLY; event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ? -ECONNRESET : status; event.local_addr = ep->com.local_addr; event.remote_addr = ep->com.remote_addr; if ((status == 0) || (status == -ECONNREFUSED)) { if (!ep->tried_with_mpa_v1) { CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep); /* this means MPA_v2 is used */ event.ord = ep->ird; event.ird = ep->ord; event.private_data_len = ep->plen - sizeof(struct mpa_v2_conn_params); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + sizeof(struct mpa_v2_conn_params); } else { CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep); /* this means MPA_v1 is used */ event.ord = c4iw_max_read_depth; event.ird = c4iw_max_read_depth; event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } } if (ep->com.cm_id) { CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep); set_bit(CONN_RPL_UPCALL, &ep->com.history); ep->com.cm_id->event_handler(ep->com.cm_id, &event); } if(status == -ECONNABORTED) { CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status); return; } if (status < 0) { CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status); deref_cm_id(&ep->com); } CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep); } static int connect_request_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; int ret; CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep, ep->tried_with_mpa_v1); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REQUEST; event.local_addr = ep->com.local_addr; event.remote_addr = ep->com.remote_addr; event.provider_data = ep; if (!ep->tried_with_mpa_v1) { /* this means MPA_v2 is used */ event.ord = ep->ord; event.ird = ep->ird; event.private_data_len = ep->plen - sizeof(struct mpa_v2_conn_params); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + sizeof(struct mpa_v2_conn_params); } else { /* this means MPA_v1 is used. Send max supported */ event.ord = c4iw_max_read_depth; event.ird = c4iw_max_read_depth; event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } c4iw_get_ep(&ep->com); ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, &event); if(ret) { CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to" " IWCM, err:%d", __func__, ep, ret); c4iw_put_ep(&ep->com); } else /* Dereference parent_ep only in success case. * In case of failure, parent_ep is dereferenced by the caller * of process_mpa_request(). */ c4iw_put_ep(&ep->parent_ep->com); set_bit(CONNREQ_UPCALL, &ep->com.history); return ret; } static void established_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_ESTABLISHED; event.ird = ep->ord; event.ord = ep->ird; if (ep->com.cm_id) { CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep); ep->com.cm_id->event_handler(ep->com.cm_id, &event); set_bit(ESTAB_UPCALL, &ep->com.history); } CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep); } #define RELAXED_IRD_NEGOTIATION 1 /* * process_mpa_reply - process streaming mode MPA reply * * Returns: * * 0 upon success indicating a connect request was delivered to the ULP * or the mpa request is incomplete but valid so far. * * 1 if a failure requires the caller to close the connection. * * 2 if a failure requires the caller to abort the connection. */ static int process_mpa_reply(struct c4iw_ep *ep) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; u16 plen; u16 resp_ird, resp_ord; u8 rtr_mismatch = 0, insuff_ird = 0; struct c4iw_qp_attributes attrs = {0}; enum c4iw_qp_attr_mask mask; int err; struct mbuf *top, *m; int flags = MSG_DONTWAIT; struct uio uio; int disconnect = 0; CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep); /* * Stop mpa timer. If it expired, then * we ignore the MPA reply. process_timeout() * will abort the connection. */ if (STOP_EP_TIMER(ep)) return 0; uio.uio_resid = 1000000; uio.uio_td = ep->com.thread; err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags); if (err) { if (err == EWOULDBLOCK) { CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep); START_EP_TIMER(ep); return 0; } err = -err; CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep); goto err; } if (ep->com.so->so_rcv.sb_mb) { CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep); printf("%s data after soreceive called! so %p sb_mb %p top %p\n", __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top); } m = top; do { CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep); /* * If we get more than the supported amount of private data * then we must fail this connection. */ if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) { CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep, ep->mpa_pkt_len + m->m_len); err = (-EINVAL); goto err_stop_timer; } /* * copy the new data into our accumulation buffer. */ m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len])); ep->mpa_pkt_len += m->m_len; if (!m->m_next) m = m->m_nextpkt; else m = m->m_next; } while (m); m_freem(top); /* * if we don't even have the mpa message, then bail. */ if (ep->mpa_pkt_len < sizeof(*mpa)) { return 0; } mpa = (struct mpa_message *) ep->mpa_pkt; /* Validate MPA header. */ if (mpa->revision > mpa_rev) { CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep, mpa->revision, mpa_rev); printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, " " Received = %d\n", __func__, mpa_rev, mpa->revision); err = -EPROTO; goto err_stop_timer; } if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep); err = -EPROTO; goto err_stop_timer; } plen = ntohs(mpa->private_data_size); /* * Fail if there's too much private data. */ if (plen > MPA_MAX_PRIVATE_DATA) { CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep); err = -EPROTO; goto err_stop_timer; } /* * If plen does not account for pkt size */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep); STOP_EP_TIMER(ep); err = -EPROTO; goto err_stop_timer; } ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) { CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep); return 0; } if (mpa->flags & MPA_REJECT) { CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep); err = -ECONNREFUSED; goto err_stop_timer; } /* * If we get here we have accumulated the entire mpa * start reply message including private data. And * the MPA header is valid. */ ep->com.state = FPDU_MODE; ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; ep->mpa_attr.recv_marker_enabled = markers_enabled; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; if (mpa->revision == 2) { CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep); ep->mpa_attr.enhanced_rdma_conn = mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; if (ep->mpa_attr.enhanced_rdma_conn) { CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep); mpa_v2_params = (struct mpa_v2_conn_params *) (ep->mpa_pkt + sizeof(*mpa)); resp_ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; resp_ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; /* * This is a double-check. Ideally, below checks are * not required since ird/ord stuff has been taken * care of in c4iw_accept_cr */ if (ep->ird < resp_ord) { if (RELAXED_IRD_NEGOTIATION && resp_ord <= ep->com.dev->rdev.adap->params.max_ordird_qp) ep->ird = resp_ord; else insuff_ird = 1; } else if (ep->ird > resp_ord) { ep->ird = resp_ord; } if (ep->ord > resp_ird) { if (RELAXED_IRD_NEGOTIATION) ep->ord = resp_ird; else insuff_ird = 1; } if (insuff_ird) { err = -ENOMEM; ep->ird = resp_ord; ep->ord = resp_ird; } if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) { CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep); if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_WRITE_RTR) { CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep); ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_RDMA_WRITE; } else if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_READ_RTR) { CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep); ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; } } } } else { CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep); if (mpa->revision == 1) { CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep); if (peer2peer) { CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep); ep->mpa_attr.p2p_type = p2p_type; } } } if (set_tcpinfo(ep)) { CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep); printf("%s set_tcpinfo error\n", __func__); err = -ECONNRESET; goto err; } CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, " "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__, ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.p2p_type); /* * If responder's RTR does not match with that of initiator, assign * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep); ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1; } //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = C4IW_QP_STATE_RTS; mask = C4IW_QP_ATTR_NEXT_STATE | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; /* bind QP and TID with INIT_WR */ err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) { CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep); goto err; } /* * If responder's RTR requirement did not match with what initiator * supports, generate TERM message */ if (rtr_mismatch) { CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep); printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_NOMATCH_RTR; attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.send_term = 1; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); err = -ENOMEM; disconnect = 1; goto out; } /* * Generate TERM if initiator IRD is not sufficient for responder * provided ORD. Currently, we do the same behaviour even when * responder provided IRD is also not sufficient as regards to * initiator ORD. */ if (insuff_ird) { CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep); printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", __func__); attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_INSUFF_IRD; attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.send_term = 1; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); err = -ENOMEM; disconnect = 1; goto out; } goto out; err_stop_timer: STOP_EP_TIMER(ep); err: disconnect = 2; out: connect_reply_upcall(ep, err); CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep); return disconnect; } /* * process_mpa_request - process streaming mode MPA request * * Returns: * * 0 upon success indicating a connect request was delivered to the ULP * or the mpa request is incomplete but valid so far. * * 1 if a failure requires the caller to close the connection. * * 2 if a failure requires the caller to abort the connection. */ static int process_mpa_request(struct c4iw_ep *ep) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; u16 plen; int flags = MSG_DONTWAIT; int rc; struct iovec iov; struct uio uio; enum c4iw_ep_state state = ep->com.state; CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]); if (state != MPA_REQ_WAIT) return 0; iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len]; iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */ rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags); if (rc == EAGAIN) return 0; else if (rc) goto err_stop_timer; KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data", __func__, ep->com.so)); ep->mpa_pkt_len += uio.uio_offset; /* * If we get more than the supported amount of private data then we must * fail this connection. XXX: check so_rcv->sb_cc, or peek with another * soreceive, or increase the size of mpa_pkt by 1 and abort if the last * byte is filled by the soreceive above. */ /* Don't even have the MPA message. Wait for more data to arrive. */ if (ep->mpa_pkt_len < sizeof(*mpa)) return 0; mpa = (struct mpa_message *) ep->mpa_pkt; /* * Validate MPA Header. */ if (mpa->revision > mpa_rev) { log(LOG_ERR, "%s: MPA version mismatch. Local = %d," " Received = %d\n", __func__, mpa_rev, mpa->revision); goto err_stop_timer; } if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) goto err_stop_timer; /* * Fail if there's too much private data. */ plen = ntohs(mpa->private_data_size); if (plen > MPA_MAX_PRIVATE_DATA) goto err_stop_timer; /* * If plen does not account for pkt size */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) goto err_stop_timer; ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) return 0; /* * If we get here we have accumulated the entire mpa * start reply message including private data. */ ep->mpa_attr.initiator = 0; ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; ep->mpa_attr.recv_marker_enabled = markers_enabled; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; if (mpa->revision == 1) ep->tried_with_mpa_v1 = 1; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; if (mpa->revision == 2) { ep->mpa_attr.enhanced_rdma_conn = mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; if (ep->mpa_attr.enhanced_rdma_conn) { mpa_v2_params = (struct mpa_v2_conn_params *) (ep->mpa_pkt + sizeof(*mpa)); ep->ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; ep->ird = min_t(u32, ep->ird, cur_max_read_depth(ep->com.dev)); ep->ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; ep->ord = min_t(u32, ep->ord, cur_max_read_depth(ep->com.dev)); CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u", __func__, ep->ird, ep->ord); if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) if (peer2peer) { if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_WRITE_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_RDMA_WRITE; else if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_READ_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; } } } else if (mpa->revision == 1 && peer2peer) ep->mpa_attr.p2p_type = p2p_type; if (set_tcpinfo(ep)) goto err_stop_timer; CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, " "xmit_marker_enabled = %d, version = %d", __func__, ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); ep->com.state = MPA_REQ_RCVD; STOP_EP_TIMER(ep); /* drive upcall */ if (ep->parent_ep->com.state != DEAD) if (connect_request_upcall(ep)) goto err_out; return 0; err_stop_timer: STOP_EP_TIMER(ep); err_out: return 2; } /* * Upcall from the adapter indicating data has been transmitted. * For us its just the single MPA request or reply. We can now free * the skb holding the mpa message. */ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { #ifdef KTR int err; #endif struct c4iw_ep *ep = to_ep(cm_id); int abort = 0; mutex_lock(&ep->com.mutex); CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep); if ((ep->com.state == DEAD) || (ep->com.state != MPA_REQ_RCVD)) { CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep); mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); return -ECONNRESET; } set_bit(ULP_REJECT, &ep->com.history); if (mpa_rev == 0) { CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep); abort = 1; } else { CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep); abort = send_mpa_reject(ep, pdata, pdata_len); } STOP_EP_TIMER(ep); #ifdef KTR err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); #else c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); #endif mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err); return 0; } int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { int err; struct c4iw_qp_attributes attrs = {0}; enum c4iw_qp_attr_mask mask; struct c4iw_ep *ep = to_ep(cm_id); struct c4iw_dev *h = to_c4iw_dev(cm_id->device); struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); int abort = 0; mutex_lock(&ep->com.mutex); CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep); if ((ep->com.state == DEAD) || (ep->com.state != MPA_REQ_RCVD)) { CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep); err = -ECONNRESET; goto err_out; } BUG_ON(!qp); set_bit(ULP_ACCEPT, &ep->com.history); if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep); err = -EINVAL; goto err_abort; } if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep); if (conn_param->ord > ep->ird) { if (RELAXED_IRD_NEGOTIATION) { conn_param->ord = ep->ird; } else { ep->ird = conn_param->ird; ep->ord = conn_param->ord; send_mpa_reject(ep, conn_param->private_data, conn_param->private_data_len); err = -ENOMEM; goto err_abort; } } if (conn_param->ird < ep->ord) { if (RELAXED_IRD_NEGOTIATION && ep->ord <= h->rdev.adap->params.max_ordird_qp) { conn_param->ird = ep->ord; } else { err = -ENOMEM; goto err_abort; } } } ep->ird = conn_param->ird; ep->ord = conn_param->ord; if (ep->mpa_attr.version == 1) { if (peer2peer && ep->ird == 0) ep->ird = 1; } else { if (peer2peer && (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) ep->ird = 1; } CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__, ep->ird, ep->ord); ep->com.cm_id = cm_id; ref_cm_id(&ep->com); ep->com.qp = qp; ref_qp(ep); //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; /* bind QP to EP and move to RTS */ attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = C4IW_QP_STATE_RTS; /* bind QP and TID with INIT_WR */ mask = C4IW_QP_ATTR_NEXT_STATE | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) { CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err); goto err_defef_cm_id; } err = send_mpa_reply(ep, conn_param->private_data, conn_param->private_data_len); if (err) { CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err); goto err_defef_cm_id; } ep->com.state = FPDU_MODE; established_upcall(ep); mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep); return 0; err_defef_cm_id: deref_cm_id(&ep->com); err_abort: abort = 1; err_out: if (abort) c4iw_ep_disconnect(ep, 1, GFP_KERNEL); mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep); return err; } static int c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so) { int ret; int size, on; struct socket *sock = NULL; struct sockopt sopt; ret = sock_create_kern(laddr->ss_family, SOCK_STREAM, IPPROTO_TCP, &sock); if (ret) { CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d", __func__, ret); return ret; } if (reuseaddr) { bzero(&sopt, sizeof(struct sockopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_REUSEADDR; on = 1; sopt.sopt_val = &on; sopt.sopt_valsize = sizeof(on); ret = -sosetopt(sock, &sopt); if (ret != 0) { log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) " "failed with %d.\n", __func__, sock, ret); } bzero(&sopt, sizeof(struct sockopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_REUSEPORT; on = 1; sopt.sopt_val = &on; sopt.sopt_valsize = sizeof(on); ret = -sosetopt(sock, &sopt); if (ret != 0) { log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) " "failed with %d.\n", __func__, sock, ret); } } ret = -sobind(sock, (struct sockaddr *)laddr, curthread); if (ret) { CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p", __func__, ret); sock_release(sock); return ret; } size = laddr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in); ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0); if (ret) { CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p", __func__, ret); sock_release(sock); return ret; } *so = sock; return 0; } int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { int err = 0; struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_ep *ep = NULL; if_t nh_ifp; /* Logical egress interface */ struct epoch_tracker et; #ifdef VIMAGE struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context; struct vnet *vnet = rdma_id->route.addr.dev_addr.net; #endif CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id); if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id); err = -EINVAL; goto out; } ep = alloc_ep(sizeof(*ep), GFP_KERNEL); cm_id->provider_data = ep; init_timer(&ep->timer); ep->plen = conn_param->private_data_len; if (ep->plen) { CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep); memcpy(ep->mpa_pkt + sizeof(struct mpa_message), conn_param->private_data, ep->plen); } ep->ird = conn_param->ird; ep->ord = conn_param->ord; if (peer2peer && ep->ord == 0) { CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep); ep->ord = 1; } ep->com.dev = dev; ep->com.cm_id = cm_id; ref_cm_id(&ep->com); ep->com.qp = get_qhp(dev, conn_param->qpn); if (!ep->com.qp) { CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep); err = -EINVAL; goto fail; } ref_qp(ep); ep->com.thread = curthread; NET_EPOCH_ENTER(et); CURVNET_SET(vnet); err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp); CURVNET_RESTORE(); NET_EPOCH_EXIT(et); if (err) { CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep); printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); err = EHOSTUNREACH; return err; } if (!(if_getcapenable(nh_ifp) & IFCAP_TOE) || TOEDEV(nh_ifp) == NULL) { err = -ENOPROTOOPT; goto fail; } ep->com.state = CONNECTING; ep->tos = 0; ep->com.local_addr = cm_id->local_addr; ep->com.remote_addr = cm_id->remote_addr; err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so); if (err) goto fail; setiwsockopt(ep->com.so); init_iwarp_socket(ep->com.so, &ep->com); err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr, ep->com.thread); if (err) goto fail_free_so; CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep); return 0; fail_free_so: uninit_iwarp_socket(ep->com.so); ep->com.state = DEAD; sock_release(ep->com.so); fail: deref_cm_id(&ep->com); c4iw_put_ep(&ep->com); ep = NULL; out: CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err); return err; } /* * iwcm->create_listen. Returns -errno on failure. */ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) { struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_listen_ep *lep = NULL; struct listen_port_info *port_info = NULL; int rc = 0; CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id, backlog); if (c4iw_fatal_error(&dev->rdev)) { CTR2(KTR_IW_CXGBE, "%s: cm_id %p, fatal error", __func__, cm_id); return -EIO; } lep = alloc_ep(sizeof(*lep), GFP_KERNEL); lep->com.cm_id = cm_id; ref_cm_id(&lep->com); lep->com.dev = dev; lep->backlog = backlog; lep->com.local_addr = cm_id->local_addr; lep->com.thread = curthread; cm_id->provider_data = lep; lep->com.state = LISTEN; /* In case of INDADDR_ANY, ibcore creates cmid for each device and * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates * HW listeners for each device seperately. But toecore expects single * solisten() call with INADDR_ANY address to create HW listeners on * all devices for a given port number. So iw_cxgbe driver calls * solisten() only once for INADDR_ANY(usually done at first time * listener callback from ibcore). And all the subsequent INADDR_ANY * listener callbacks from ibcore(for the same port address) do not * invoke solisten() as first listener callback has already created * listeners for all other devices(via solisten). */ if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr, NULL)) { port_info = add_ep_to_listenlist(lep); /* skip solisten() if refcnt > 1, as the listeners were * already created by 'Master lep' */ if (port_info->refcnt > 1) { /* As there will be only one listener socket for a TCP * port, copy Master lep's socket pointer to other lep's * that are belonging to same TCP port. */ struct c4iw_listen_ep *head_lep = container_of(port_info->lep_list.next, struct c4iw_listen_ep, listen_ep_list); lep->com.so = head_lep->com.so; goto out; } } rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so); if (rc) { CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d", __func__, rc); goto fail; } rc = -solisten(lep->com.so, backlog, curthread); if (rc) { CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d", __func__, lep->com.so, rc); goto fail_free_so; } init_iwarp_socket(lep->com.so, &lep->com); out: return 0; fail_free_so: sock_release(lep->com.so); fail: if (port_info) rem_ep_from_listenlist(lep); deref_cm_id(&lep->com); c4iw_put_ep(&lep->com); return rc; } int c4iw_destroy_listen(struct iw_cm_id *cm_id) { struct c4iw_listen_ep *lep = to_listen_ep(cm_id); mutex_lock(&lep->com.mutex); CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id, states[lep->com.state]); lep->com.state = DEAD; if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr, lep->com.so->so_vnet)) { /* if no refcount then close listen socket */ if (!rem_ep_from_listenlist(lep)) close_socket(lep->com.so); } else close_socket(lep->com.so); deref_cm_id(&lep->com); mutex_unlock(&lep->com.mutex); c4iw_put_ep(&lep->com); return 0; } int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) { int ret; mutex_lock(&ep->com.mutex); ret = c4iw_ep_disconnect(ep, abrupt, gfp); mutex_unlock(&ep->com.mutex); return ret; } int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) { int ret = 0; int close = 0; struct c4iw_rdev *rdev; CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep); rdev = &ep->com.dev->rdev; if (c4iw_fatal_error(rdev)) { CTR3(KTR_IW_CXGBE, "%s:ced1 fatal error %p %s", __func__, ep, states[ep->com.state]); if (ep->com.state != DEAD) { send_abort(ep); ep->com.state = DEAD; } close_complete_upcall(ep, -ECONNRESET); return ECONNRESET; } CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep, states[ep->com.state]); /* * Ref the ep here in case we have fatal errors causing the * ep to be released and freed. */ c4iw_get_ep(&ep->com); switch (ep->com.state) { case MPA_REQ_WAIT: case MPA_REQ_SENT: case MPA_REQ_RCVD: case MPA_REP_SENT: case FPDU_MODE: close = 1; if (abrupt) ep->com.state = ABORTING; else { ep->com.state = CLOSING; START_EP_TIMER(ep); } set_bit(CLOSE_SENT, &ep->com.flags); break; case CLOSING: if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { close = 1; if (abrupt) { STOP_EP_TIMER(ep); ep->com.state = ABORTING; } else ep->com.state = MORIBUND; } break; case MORIBUND: case ABORTING: case DEAD: CTR3(KTR_IW_CXGBE, "%s ignoring disconnect ep %p state %u", __func__, ep, ep->com.state); break; default: BUG(); break; } if (close) { CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep); if (abrupt) { CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep); set_bit(EP_DISC_ABORT, &ep->com.history); close_complete_upcall(ep, -ECONNRESET); send_abort(ep); } else { CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep); set_bit(EP_DISC_CLOSE, &ep->com.history); if (!ep->parent_ep) ep->com.state = MORIBUND; CURVNET_SET(ep->com.so->so_vnet); ret = sodisconnect(ep->com.so); CURVNET_RESTORE(); if (ret) { CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep); STOP_EP_TIMER(ep); send_abort(ep); ep->com.state = DEAD; close_complete_upcall(ep, -ECONNRESET); set_bit(EP_DISC_FAIL, &ep->com.history); if (ep->com.qp) { struct c4iw_qp_attributes attrs = {0}; attrs.next_state = C4IW_QP_STATE_ERROR; ret = c4iw_modify_qp( ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); CTR3(KTR_IW_CXGBE, "%s:ced7 %p ret %d", __func__, ep, ret); } } } } c4iw_put_ep(&ep->com); CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep); return ret; } #ifdef C4IW_EP_REDIRECT int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t) { struct c4iw_ep *ep = ctx; if (ep->dst != old) return 0; PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, l2t); dst_hold(new); cxgb4_l2t_release(ep->l2t); ep->l2t = l2t; dst_release(old); ep->dst = new; return 1; } #endif static void ep_timeout(unsigned long arg) { struct c4iw_ep *ep = (struct c4iw_ep *)arg; if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { /* * Only insert if it is not already on the list. */ if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) { CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep); add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT); } } } static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl) { uint64_t val = be64toh(*rpl); int ret; struct c4iw_wr_wait *wr_waitp; ret = (int)((val >> 8) & 0xff); wr_waitp = (struct c4iw_wr_wait *)rpl[1]; CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret); if (wr_waitp) c4iw_wake_up(wr_waitp, ret ? -ret : 0); return (0); } static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl) { struct cqe_list_entry *cle; unsigned long flag; cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT); cle->rhp = sc->iwarp_softc; cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]); spin_lock_irqsave(&err_cqe_lock, flag); list_add_tail(&cle->entry, &err_cqe_list); queue_work(c4iw_taskq, &c4iw_task); spin_unlock_irqrestore(&err_cqe_lock, flag); return (0); } static int process_terminate(struct c4iw_ep *ep) { struct c4iw_qp_attributes attrs = {0}; CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep); if (ep && ep->com.qp) { printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", ep->hwtid, ep->com.qp->wq.sq.qid); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } else printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", ep->hwtid); CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep); return 0; } int __init c4iw_cm_init(void) { t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate); t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl); t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler); t4_register_an_handler(c4iw_ev_handler); TAILQ_INIT(&req_list); spin_lock_init(&req_lock); INIT_LIST_HEAD(&err_cqe_list); spin_lock_init(&err_cqe_lock); INIT_WORK(&c4iw_task, process_req); c4iw_taskq = create_singlethread_workqueue("iw_cxgbe"); if (!c4iw_taskq) return -ENOMEM; return 0; } void __exit c4iw_cm_term(void) { WARN_ON(!TAILQ_EMPTY(&req_list)); WARN_ON(!list_empty(&err_cqe_list)); flush_workqueue(c4iw_taskq); destroy_workqueue(c4iw_taskq); t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL); t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL); t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL); t4_register_an_handler(NULL); } #endif diff --git a/sys/dev/hyperv/hvsock/hv_sock.c b/sys/dev/hyperv/hvsock/hv_sock.c index 655cc990876e..78ec520f1bb3 100644 --- a/sys/dev/hyperv/hvsock/hv_sock.c +++ b/sys/dev/hyperv/hvsock/hv_sock.c @@ -1,1741 +1,1741 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_sock.h" #define HVSOCK_DBG_NONE 0x0 #define HVSOCK_DBG_INFO 0x1 #define HVSOCK_DBG_ERR 0x2 #define HVSOCK_DBG_VERBOSE 0x3 SYSCTL_NODE(_net, OID_AUTO, hvsock, CTLFLAG_RD, 0, "HyperV socket"); static int hvs_dbg_level; SYSCTL_INT(_net_hvsock, OID_AUTO, hvs_dbg_level, CTLFLAG_RWTUN, &hvs_dbg_level, 0, "hyperv socket debug level: 0 = none, 1 = info, 2 = error, 3 = verbose"); #define HVSOCK_DBG(level, ...) do { \ if (hvs_dbg_level >= (level)) \ printf(__VA_ARGS__); \ } while (0) MALLOC_DEFINE(M_HVSOCK, "hyperv_socket", "hyperv socket control structures"); static int hvs_dom_probe(void); /* The MTU is 16KB per host side's design */ #define HVSOCK_MTU_SIZE (1024 * 16) #define HVSOCK_SEND_BUF_SZ (PAGE_SIZE - sizeof(struct vmpipe_proto_header)) #define HVSOCK_HEADER_LEN (sizeof(struct hvs_pkt_header)) #define HVSOCK_PKT_LEN(payload_len) (HVSOCK_HEADER_LEN + \ roundup2(payload_len, 8) + \ sizeof(uint64_t)) /* * HyperV Transport sockets */ static struct protosw hv_socket_protosw = { .pr_type = SOCK_STREAM, .pr_protocol = HYPERV_SOCK_PROTO_TRANS, .pr_flags = PR_CONNREQUIRED, .pr_attach = hvs_trans_attach, .pr_bind = hvs_trans_bind, .pr_listen = hvs_trans_listen, .pr_accept = hvs_trans_accept, .pr_connect = hvs_trans_connect, .pr_peeraddr = hvs_trans_peeraddr, .pr_sockaddr = hvs_trans_sockaddr, .pr_soreceive = hvs_trans_soreceive, .pr_sosend = hvs_trans_sosend, .pr_disconnect = hvs_trans_disconnect, .pr_close = hvs_trans_close, .pr_detach = hvs_trans_detach, .pr_shutdown = hvs_trans_shutdown, .pr_abort = hvs_trans_abort, }; static struct domain hv_socket_domain = { .dom_family = AF_HYPERV, .dom_name = "hyperv", .dom_probe = hvs_dom_probe, .dom_nprotosw = 1, .dom_protosw = { &hv_socket_protosw }, }; DOMAIN_SET(hv_socket_); #define MAX_PORT ((uint32_t)0xFFFFFFFF) #define MIN_PORT ((uint32_t)0x0) /* 00000000-facb-11e6-bd58-64006a7986d3 */ static const struct hyperv_guid srv_id_template = { .hv_guid = { 0x00, 0x00, 0x00, 0x00, 0xcb, 0xfa, 0xe6, 0x11, 0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3 } }; static int hvsock_br_callback(void *, int, void *); static uint32_t hvsock_canread_check(struct hvs_pcb *); static uint32_t hvsock_canwrite_check(struct hvs_pcb *); static int hvsock_send_data(struct vmbus_channel *chan, struct uio *uio, uint32_t to_write, struct sockbuf *sb); /* Globals */ static struct sx hvs_trans_socks_sx; static struct mtx hvs_trans_socks_mtx; static LIST_HEAD(, hvs_pcb) hvs_trans_bound_socks; static LIST_HEAD(, hvs_pcb) hvs_trans_connected_socks; static uint32_t previous_auto_bound_port; static void hvsock_print_guid(struct hyperv_guid *guid) { unsigned char *p = (unsigned char *)guid; HVSOCK_DBG(HVSOCK_DBG_INFO, "0x%x-0x%x-0x%x-0x%x-0x%x-0x%x-0x%x-0x%x-0x%x-0x%x-0x%x\n", *(unsigned int *)p, *((unsigned short *) &p[4]), *((unsigned short *) &p[6]), p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); } static bool is_valid_srv_id(const struct hyperv_guid *id) { return !memcmp(&id->hv_guid[4], &srv_id_template.hv_guid[4], sizeof(struct hyperv_guid) - 4); } static unsigned int get_port_by_srv_id(const struct hyperv_guid *srv_id) { return *((const unsigned int *)srv_id); } static void set_port_by_srv_id(struct hyperv_guid *srv_id, unsigned int port) { *((unsigned int *)srv_id) = port; } static void __hvs_remove_pcb_from_list(struct hvs_pcb *pcb, unsigned char list) { struct hvs_pcb *p = NULL; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: pcb is %p\n", __func__, pcb); if (!pcb) return; if (list & HVS_LIST_BOUND) { LIST_FOREACH(p, &hvs_trans_bound_socks, bound_next) if (p == pcb) LIST_REMOVE(p, bound_next); } if (list & HVS_LIST_CONNECTED) { LIST_FOREACH(p, &hvs_trans_connected_socks, connected_next) if (p == pcb) LIST_REMOVE(pcb, connected_next); } } static void __hvs_remove_socket_from_list(struct socket *so, unsigned char list) { struct hvs_pcb *pcb = so2hvspcb(so); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: pcb is %p\n", __func__, pcb); __hvs_remove_pcb_from_list(pcb, list); } static void __hvs_insert_socket_on_list(struct socket *so, unsigned char list) { struct hvs_pcb *pcb = so2hvspcb(so); if (list & HVS_LIST_BOUND) LIST_INSERT_HEAD(&hvs_trans_bound_socks, pcb, bound_next); if (list & HVS_LIST_CONNECTED) LIST_INSERT_HEAD(&hvs_trans_connected_socks, pcb, connected_next); } void hvs_remove_socket_from_list(struct socket *so, unsigned char list) { if (!so || !so->so_pcb) { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: socket or so_pcb is null\n", __func__); return; } mtx_lock(&hvs_trans_socks_mtx); __hvs_remove_socket_from_list(so, list); mtx_unlock(&hvs_trans_socks_mtx); } static void hvs_insert_socket_on_list(struct socket *so, unsigned char list) { if (!so || !so->so_pcb) { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: socket or so_pcb is null\n", __func__); return; } mtx_lock(&hvs_trans_socks_mtx); __hvs_insert_socket_on_list(so, list); mtx_unlock(&hvs_trans_socks_mtx); } static struct socket * __hvs_find_socket_on_list(struct sockaddr_hvs *addr, unsigned char list) { struct hvs_pcb *p = NULL; if (list & HVS_LIST_BOUND) LIST_FOREACH(p, &hvs_trans_bound_socks, bound_next) if (p->so != NULL && addr->hvs_port == p->local_addr.hvs_port) return p->so; if (list & HVS_LIST_CONNECTED) LIST_FOREACH(p, &hvs_trans_connected_socks, connected_next) if (p->so != NULL && addr->hvs_port == p->local_addr.hvs_port) return p->so; return NULL; } static struct socket * hvs_find_socket_on_list(struct sockaddr_hvs *addr, unsigned char list) { struct socket *s = NULL; mtx_lock(&hvs_trans_socks_mtx); s = __hvs_find_socket_on_list(addr, list); mtx_unlock(&hvs_trans_socks_mtx); return s; } static inline void hvs_addr_set(struct sockaddr_hvs *addr, unsigned int port) { memset(addr, 0, sizeof(*addr)); addr->sa_family = AF_HYPERV; addr->sa_len = sizeof(*addr); addr->hvs_port = port; } void hvs_addr_init(struct sockaddr_hvs *addr, const struct hyperv_guid *svr_id) { hvs_addr_set(addr, get_port_by_srv_id(svr_id)); } int hvs_trans_lock(void) { sx_xlock(&hvs_trans_socks_sx); return (0); } void hvs_trans_unlock(void) { sx_xunlock(&hvs_trans_socks_sx); } static int hvs_dom_probe(void) { /* Don't even give us a chance to attach on non-HyperV. */ if (vm_guest != VM_GUEST_HV) return (ENXIO); return (0); } static void hvs_trans_init(void *arg __unused) { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_init called\n", __func__); /* Initialize Globals */ previous_auto_bound_port = MAX_PORT; sx_init(&hvs_trans_socks_sx, "hvs_trans_sock_sx"); mtx_init(&hvs_trans_socks_mtx, "hvs_trans_socks_mtx", NULL, MTX_DEF); LIST_INIT(&hvs_trans_bound_socks); LIST_INIT(&hvs_trans_connected_socks); } SYSINIT(hvs_trans_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, hvs_trans_init, NULL); /* * Called in two cases: * 1) When user calls socket(); * 2) When we accept new incoming conneciton and call sonewconn(). */ int hvs_trans_attach(struct socket *so, int proto, struct thread *td) { struct hvs_pcb *pcb = so2hvspcb(so); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_attach called\n", __func__); if (so->so_type != SOCK_STREAM) return (ESOCKTNOSUPPORT); if (proto != 0 && proto != HYPERV_SOCK_PROTO_TRANS) return (EPROTONOSUPPORT); if (pcb != NULL) return (EISCONN); pcb = malloc(sizeof(struct hvs_pcb), M_HVSOCK, M_NOWAIT | M_ZERO); if (pcb == NULL) return (ENOMEM); pcb->so = so; so->so_pcb = (void *)pcb; return (0); } void hvs_trans_detach(struct socket *so) { struct hvs_pcb *pcb; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_detach called\n", __func__); (void) hvs_trans_lock(); pcb = so2hvspcb(so); if (pcb == NULL) { hvs_trans_unlock(); return; } if (SOLISTENING(so)) { bzero(pcb, sizeof(*pcb)); free(pcb, M_HVSOCK); } so->so_pcb = NULL; hvs_trans_unlock(); } int hvs_trans_bind(struct socket *so, struct sockaddr *addr, struct thread *td) { struct hvs_pcb *pcb = so2hvspcb(so); struct sockaddr_hvs *sa = (struct sockaddr_hvs *) addr; int error = 0; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_bind called\n", __func__); if (sa == NULL) { return (EINVAL); } if (pcb == NULL) { return (EINVAL); } if (sa->sa_family != AF_HYPERV) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: Not supported, sa_family is %u\n", __func__, sa->sa_family); return (EAFNOSUPPORT); } if (sa->sa_len != sizeof(*sa)) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: Not supported, sa_len is %u\n", __func__, sa->sa_len); return (EINVAL); } HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: binding port = 0x%x\n", __func__, sa->hvs_port); mtx_lock(&hvs_trans_socks_mtx); if (__hvs_find_socket_on_list(sa, HVS_LIST_BOUND | HVS_LIST_CONNECTED)) { error = EADDRINUSE; } else { /* * The address is available for us to bind. * Add socket to the bound list. */ hvs_addr_set(&pcb->local_addr, sa->hvs_port); hvs_addr_set(&pcb->remote_addr, HVADDR_PORT_ANY); __hvs_insert_socket_on_list(so, HVS_LIST_BOUND); } mtx_unlock(&hvs_trans_socks_mtx); return (error); } int hvs_trans_listen(struct socket *so, int backlog, struct thread *td) { struct hvs_pcb *pcb = so2hvspcb(so); struct socket *bound_so; int error; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_listen called\n", __func__); if (pcb == NULL) return (EINVAL); /* Check if the address is already bound and it was by us. */ bound_so = hvs_find_socket_on_list(&pcb->local_addr, HVS_LIST_BOUND); if (bound_so == NULL || bound_so != so) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: Address not bound or not by us.\n", __func__); return (EADDRNOTAVAIL); } SOCK_LOCK(so); error = solisten_proto_check(so); if (error == 0) solisten_proto(so, backlog); SOCK_UNLOCK(so); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket listen error = %d\n", __func__, error); return (error); } int hvs_trans_accept(struct socket *so, struct sockaddr *sa) { struct hvs_pcb *pcb = so2hvspcb(so); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_accept called\n", __func__); if (pcb == NULL) return (EINVAL); memcpy(sa, &pcb->remote_addr, pcb->remote_addr.sa_len); return (0); } int hvs_trans_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { struct hvs_pcb *pcb = so2hvspcb(so); struct sockaddr_hvs *raddr = (struct sockaddr_hvs *)nam; bool found_auto_bound_port = false; int i, error = 0; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_connect called, remote port is %x\n", __func__, raddr->hvs_port); if (pcb == NULL) return (EINVAL); /* Verify the remote address */ if (raddr == NULL) return (EINVAL); if (raddr->sa_family != AF_HYPERV) return (EAFNOSUPPORT); if (raddr->sa_len != sizeof(*raddr)) return (EINVAL); mtx_lock(&hvs_trans_socks_mtx); if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTING|SS_ISCONNECTING)) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: socket connect in progress\n", __func__); error = EINPROGRESS; goto out; } /* * Find an available port for us to auto bind the local * address. */ hvs_addr_set(&pcb->local_addr, 0); for (i = previous_auto_bound_port - 1; i != previous_auto_bound_port; i --) { if (i == MIN_PORT) i = MAX_PORT; pcb->local_addr.hvs_port = i; if (__hvs_find_socket_on_list(&pcb->local_addr, HVS_LIST_BOUND | HVS_LIST_CONNECTED) == NULL) { found_auto_bound_port = true; previous_auto_bound_port = i; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: found local bound port is %x\n", __func__, pcb->local_addr.hvs_port); break; } } if (found_auto_bound_port == true) { /* Found available port for auto bound, put on list */ __hvs_insert_socket_on_list(so, HVS_LIST_BOUND); /* Set VM service ID */ pcb->vm_srv_id = srv_id_template; set_port_by_srv_id(&pcb->vm_srv_id, pcb->local_addr.hvs_port); /* Set host service ID and remote port */ pcb->host_srv_id = srv_id_template; set_port_by_srv_id(&pcb->host_srv_id, raddr->hvs_port); hvs_addr_set(&pcb->remote_addr, raddr->hvs_port); /* Change the socket state to SS_ISCONNECTING */ soisconnecting(so); } else { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: No local port available for auto bound\n", __func__); error = EADDRINUSE; } HVSOCK_DBG(HVSOCK_DBG_INFO, "Connect vm_srv_id is "); hvsock_print_guid(&pcb->vm_srv_id); HVSOCK_DBG(HVSOCK_DBG_INFO, "Connect host_srv_id is "); hvsock_print_guid(&pcb->host_srv_id); out: mtx_unlock(&hvs_trans_socks_mtx); if (found_auto_bound_port == true) vmbus_req_tl_connect(&pcb->vm_srv_id, &pcb->host_srv_id); return (error); } int hvs_trans_disconnect(struct socket *so) { struct hvs_pcb *pcb; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_disconnect called\n", __func__); (void) hvs_trans_lock(); pcb = so2hvspcb(so); if (pcb == NULL) { hvs_trans_unlock(); return (EINVAL); } /* If socket is already disconnected, skip this */ if ((so->so_state & SS_ISDISCONNECTED) == 0) soisdisconnecting(so); hvs_trans_unlock(); return (0); } struct hvs_callback_arg { struct uio *uio; struct sockbuf *sb; }; int hvs_trans_soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { struct hvs_pcb *pcb = so2hvspcb(so); struct sockbuf *sb; ssize_t orig_resid; uint32_t canread, to_read; int flags, error = 0; struct hvs_callback_arg cbarg; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_soreceive called\n", __func__); if (so->so_type != SOCK_STREAM) return (EINVAL); if (pcb == NULL) return (EINVAL); if (flagsp != NULL) flags = *flagsp &~ MSG_EOR; else flags = 0; if (flags & MSG_PEEK) return (EOPNOTSUPP); /* If no space to copy out anything */ if (uio->uio_resid == 0 || uio->uio_rw != UIO_READ) return (EINVAL); orig_resid = uio->uio_resid; /* Prevent other readers from entering the socket. */ error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (error) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: soiolock returned error = %d\n", __func__, error); return (error); } sb = &so->so_rcv; SOCKBUF_LOCK(sb); cbarg.uio = uio; cbarg.sb = sb; /* * If the socket is closing, there might still be some data * in rx br to read. However we need to make sure * the channel is still open. */ if ((sb->sb_state & SBS_CANTRCVMORE) && (so->so_state & SS_ISDISCONNECTED)) { /* Other thread already closed the channel */ error = EPIPE; goto out; } while (true) { while (uio->uio_resid > 0 && (canread = hvsock_canread_check(pcb)) > 0) { to_read = MIN(canread, uio->uio_resid); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: to_read = %u, skip = %u\n", __func__, to_read, (unsigned int)(sizeof(struct hvs_pkt_header) + pcb->recv_data_off)); error = vmbus_chan_recv_peek_call(pcb->chan, to_read, sizeof(struct hvs_pkt_header) + pcb->recv_data_off, hvsock_br_callback, (void *)&cbarg); /* * It is possible socket is disconnected becasue * we released lock in hvsock_br_callback. So we * need to check the state to make sure it is not * disconnected. */ if (error || so->so_state & SS_ISDISCONNECTED) { break; } pcb->recv_data_len -= to_read; pcb->recv_data_off += to_read; } if (error) break; /* Abort if socket has reported problems. */ if (so->so_error) { if (so->so_error == ESHUTDOWN && orig_resid > uio->uio_resid) { /* * Although we got a FIN, we also received * some data in this round. Delivery it * to user. */ error = 0; } else { if (so->so_error != ESHUTDOWN) error = so->so_error; } break; } /* Cannot received more. */ if (sb->sb_state & SBS_CANTRCVMORE) break; /* We are done if buffer has been filled */ if (uio->uio_resid == 0) break; if (!(flags & MSG_WAITALL) && orig_resid > uio->uio_resid) break; /* Buffer ring is empty and we shall not block */ if ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO))) { if (orig_resid == uio->uio_resid) { /* We have not read anything */ error = EAGAIN; } HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: non blocked read return, error %d.\n", __func__, error); break; } /* * Wait and block until (more) data comes in. * Note: Drops the sockbuf lock during wait. */ error = sbwait(so, SO_RCV); if (error) break; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: wake up from sbwait, read available is %u\n", __func__, vmbus_chan_read_available(pcb->chan)); } out: SOCKBUF_UNLOCK(sb); SOCK_IO_RECV_UNLOCK(so); /* We recieved a FIN in this call */ if (so->so_error == ESHUTDOWN) { if (so->so_snd.sb_state & SBS_CANTSENDMORE) { /* Send has already closed */ soisdisconnecting(so); } else { /* Just close the receive side */ socantrcvmore(so); } } HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: returning error = %d, so_error = %d\n", __func__, error, so->so_error); return (error); } int hvs_trans_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *controlp, int flags, struct thread *td) { struct hvs_pcb *pcb = so2hvspcb(so); struct sockbuf *sb; ssize_t orig_resid; uint32_t canwrite, to_write; int error = 0; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_sosend called, uio_resid = %zd\n", __func__, uio->uio_resid); if (so->so_type != SOCK_STREAM) return (EINVAL); if (pcb == NULL) return (EINVAL); /* If nothing to send */ if (uio->uio_resid == 0 || uio->uio_rw != UIO_WRITE) return (EINVAL); orig_resid = uio->uio_resid; /* Prevent other writers from entering the socket. */ error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); if (error) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: soiolocak returned error = %d\n", __func__, error); return (error); } sb = &so->so_snd; SOCKBUF_LOCK(sb); if ((sb->sb_state & SBS_CANTSENDMORE) || so->so_error == ESHUTDOWN) { error = EPIPE; goto out; } while (uio->uio_resid > 0) { canwrite = hvsock_canwrite_check(pcb); if (canwrite == 0) { /* We have sent some data */ if (orig_resid > uio->uio_resid) break; /* * We have not sent any data and it is * non-blocked io */ if (so->so_state & SS_NBIO || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) { error = EWOULDBLOCK; break; } else { /* * We are here because there is no space on * send buffer ring. Signal the other side * to read and free more space. * Sleep wait until space avaiable to send * Note: Drops the sockbuf lock during wait. */ error = sbwait(so, SO_SND); if (error) break; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: wake up from sbwait, space avail on " "tx ring is %u\n", __func__, vmbus_chan_write_available(pcb->chan)); continue; } } to_write = MIN(canwrite, uio->uio_resid); to_write = MIN(to_write, HVSOCK_SEND_BUF_SZ); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: canwrite is %u, to_write = %u\n", __func__, canwrite, to_write); error = hvsock_send_data(pcb->chan, uio, to_write, sb); if (error) break; } out: SOCKBUF_UNLOCK(sb); SOCK_IO_SEND_UNLOCK(so); return (error); } int -hvs_trans_peeraddr(struct socket *so, struct sockaddr **nam) +hvs_trans_peeraddr(struct socket *so, struct sockaddr *sa) { struct hvs_pcb *pcb = so2hvspcb(so); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_peeraddr called\n", __func__); if (pcb == NULL) return (EINVAL); - *nam = sodupsockaddr((struct sockaddr *) &pcb->remote_addr, M_NOWAIT); + memcpy(sa, &pcb->remote_addr, pcb->remote_addr.sa_len); - return ((*nam == NULL)? ENOMEM : 0); + return (0); } int -hvs_trans_sockaddr(struct socket *so, struct sockaddr **nam) +hvs_trans_sockaddr(struct socket *so, struct sockaddr *sa) { struct hvs_pcb *pcb = so2hvspcb(so); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_sockaddr called\n", __func__); if (pcb == NULL) return (EINVAL); - *nam = sodupsockaddr((struct sockaddr *) &pcb->local_addr, M_NOWAIT); + memcpy(sa, &pcb->local_addr, pcb->local_addr.sa_len); - return ((*nam == NULL)? ENOMEM : 0); + return (0); } void hvs_trans_close(struct socket *so) { struct hvs_pcb *pcb; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_close called\n", __func__); (void) hvs_trans_lock(); pcb = so2hvspcb(so); if (!pcb) { hvs_trans_unlock(); return; } if (so->so_state & SS_ISCONNECTED) { /* Send a FIN to peer */ HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: hvs_trans_close sending a FIN to host\n", __func__); (void) hvsock_send_data(pcb->chan, NULL, 0, NULL); } if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) soisdisconnected(so); pcb->chan = NULL; pcb->so = NULL; if (SOLISTENING(so)) { mtx_lock(&hvs_trans_socks_mtx); /* Remove from bound list */ __hvs_remove_socket_from_list(so, HVS_LIST_BOUND); mtx_unlock(&hvs_trans_socks_mtx); } hvs_trans_unlock(); return; } void hvs_trans_abort(struct socket *so) { struct hvs_pcb *pcb = so2hvspcb(so); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_abort called\n", __func__); (void) hvs_trans_lock(); if (pcb == NULL) { hvs_trans_unlock(); return; } if (SOLISTENING(so)) { mtx_lock(&hvs_trans_socks_mtx); /* Remove from bound list */ __hvs_remove_socket_from_list(so, HVS_LIST_BOUND); mtx_unlock(&hvs_trans_socks_mtx); } if (so->so_state & SS_ISCONNECTED) { (void) sodisconnect(so); } hvs_trans_unlock(); return; } int hvs_trans_shutdown(struct socket *so) { struct hvs_pcb *pcb = so2hvspcb(so); struct sockbuf *sb; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: HyperV Socket hvs_trans_shutdown called\n", __func__); if (pcb == NULL) return (EINVAL); /* * Only get called with the shutdown method is SHUT_WR or * SHUT_RDWR. * When the method is SHUT_RD or SHUT_RDWR, the caller * already set the SBS_CANTRCVMORE on receive side socket * buffer. */ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { /* * SHUT_WR only case. * Receive side is still open. Just close * the send side. */ socantsendmore(so); } else { /* SHUT_RDWR case */ if (so->so_state & SS_ISCONNECTED) { /* Send a FIN to peer */ sb = &so->so_snd; SOCKBUF_LOCK(sb); (void) hvsock_send_data(pcb->chan, NULL, 0, sb); SOCKBUF_UNLOCK(sb); soisdisconnecting(so); } } return (0); } /* In the VM, we support Hyper-V Sockets with AF_HYPERV, and the endpoint is * (see struct sockaddr_hvs). * * On the host, Hyper-V Sockets are supported by Winsock AF_HYPERV: * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user- * guide/make-integration-service, and the endpoint is with * the below sockaddr: * * struct SOCKADDR_HV * { * ADDRESS_FAMILY Family; * USHORT Reserved; * GUID VmId; * GUID ServiceId; * }; * Note: VmID is not used by FreeBSD VM and actually it isn't transmitted via * VMBus, because here it's obvious the host and the VM can easily identify * each other. Though the VmID is useful on the host, especially in the case * of Windows container, FreeBSD VM doesn't need it at all. * * To be compatible with similar infrastructure in Linux VMs, we have * to limit the available GUID space of SOCKADDR_HV so that we can create * a mapping between FreeBSD AF_HYPERV port and SOCKADDR_HV Service GUID. * The rule of writing Hyper-V Sockets apps on the host and in FreeBSD VM is: * **************************************************************************** * The only valid Service GUIDs, from the perspectives of both the host and * * FreeBSD VM, that can be connected by the other end, must conform to this * * format: -facb-11e6-bd58-64006a7986d3. * **************************************************************************** * * When we write apps on the host to connect(), the GUID ServiceID is used. * When we write apps in FreeBSD VM to connect(), we only need to specify the * port and the driver will form the GUID and use that to request the host. * * From the perspective of FreeBSD VM, the remote ephemeral port (i.e. the * auto-generated remote port for a connect request initiated by the host's * connect()) is set to HVADDR_PORT_UNKNOWN, which is not realy used on the * FreeBSD guest. */ /* * Older HyperV hosts (vmbus version 'VMBUS_VERSION_WIN10' or before) * restricts HyperV socket ring buffer size to six 4K pages. Newer * HyperV hosts doen't have this limit. */ #define HVS_RINGBUF_RCV_SIZE (PAGE_SIZE * 6) #define HVS_RINGBUF_SND_SIZE (PAGE_SIZE * 6) #define HVS_RINGBUF_MAX_SIZE (PAGE_SIZE * 64) struct hvsock_sc { device_t dev; struct hvs_pcb *pcb; struct vmbus_channel *channel; }; static bool hvsock_chan_readable(struct vmbus_channel *chan) { uint32_t readable = vmbus_chan_read_available(chan); return (readable >= HVSOCK_PKT_LEN(0)); } static void hvsock_chan_cb(struct vmbus_channel *chan, void *context) { struct hvs_pcb *pcb = (struct hvs_pcb *) context; struct socket *so; uint32_t canwrite; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: host send us a wakeup on rb data, pcb = %p\n", __func__, pcb); /* * Check if the socket is still attached and valid. * Here we know channel is still open. Need to make * sure the socket has not been closed or freed. */ (void) hvs_trans_lock(); so = hsvpcb2so(pcb); if (pcb->chan != NULL && so != NULL) { /* * Wake up reader if there are data to read. */ SOCKBUF_LOCK(&(so)->so_rcv); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: read available = %u\n", __func__, vmbus_chan_read_available(pcb->chan)); if (hvsock_chan_readable(pcb->chan)) sorwakeup_locked(so); else SOCKBUF_UNLOCK(&(so)->so_rcv); /* * Wake up sender if space becomes available to write. */ SOCKBUF_LOCK(&(so)->so_snd); canwrite = hvsock_canwrite_check(pcb); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: canwrite = %u\n", __func__, canwrite); if (canwrite > 0) { sowwakeup_locked(so); } else { SOCKBUF_UNLOCK(&(so)->so_snd); } } hvs_trans_unlock(); return; } static int hvsock_br_callback(void *datap, int cplen, void *cbarg) { struct hvs_callback_arg *arg = (struct hvs_callback_arg *)cbarg; struct uio *uio = arg->uio; struct sockbuf *sb = arg->sb; int error = 0; if (cbarg == NULL || datap == NULL) return (EINVAL); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: called, uio_rw = %s, uio_resid = %zd, cplen = %u, " "datap = %p\n", __func__, (uio->uio_rw == UIO_READ) ? "read from br":"write to br", uio->uio_resid, cplen, datap); if (sb) SOCKBUF_UNLOCK(sb); error = uiomove(datap, cplen, uio); if (sb) SOCKBUF_LOCK(sb); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: after uiomove, uio_resid = %zd, error = %d\n", __func__, uio->uio_resid, error); return (error); } static int hvsock_send_data(struct vmbus_channel *chan, struct uio *uio, uint32_t to_write, struct sockbuf *sb) { struct hvs_pkt_header hvs_pkt; int hvs_pkthlen, hvs_pktlen, pad_pktlen, hlen, error = 0; uint64_t pad = 0; struct iovec iov[3]; struct hvs_callback_arg cbarg; if (chan == NULL) return (ENOTCONN); hlen = sizeof(struct vmbus_chanpkt_hdr); hvs_pkthlen = sizeof(struct hvs_pkt_header); hvs_pktlen = hvs_pkthlen + to_write; pad_pktlen = VMBUS_CHANPKT_TOTLEN(hvs_pktlen); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: hlen = %u, hvs_pkthlen = %u, hvs_pktlen = %u, " "pad_pktlen = %u, data_len = %u\n", __func__, hlen, hvs_pkthlen, hvs_pktlen, pad_pktlen, to_write); hvs_pkt.chan_pkt_hdr.cph_type = VMBUS_CHANPKT_TYPE_INBAND; hvs_pkt.chan_pkt_hdr.cph_flags = 0; VMBUS_CHANPKT_SETLEN(hvs_pkt.chan_pkt_hdr.cph_hlen, hlen); VMBUS_CHANPKT_SETLEN(hvs_pkt.chan_pkt_hdr.cph_tlen, pad_pktlen); hvs_pkt.chan_pkt_hdr.cph_xactid = 0; hvs_pkt.vmpipe_pkt_hdr.vmpipe_pkt_type = 1; hvs_pkt.vmpipe_pkt_hdr.vmpipe_data_size = to_write; cbarg.uio = uio; cbarg.sb = sb; if (uio && to_write > 0) { iov[0].iov_base = &hvs_pkt; iov[0].iov_len = hvs_pkthlen; iov[1].iov_base = NULL; iov[1].iov_len = to_write; iov[2].iov_base = &pad; iov[2].iov_len = pad_pktlen - hvs_pktlen; error = vmbus_chan_iov_send(chan, iov, 3, hvsock_br_callback, &cbarg); } else { if (to_write == 0) { iov[0].iov_base = &hvs_pkt; iov[0].iov_len = hvs_pkthlen; iov[1].iov_base = &pad; iov[1].iov_len = pad_pktlen - hvs_pktlen; error = vmbus_chan_iov_send(chan, iov, 2, NULL, NULL); } } if (error) { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: error = %d\n", __func__, error); } return (error); } /* * Check if we have data on current ring buffer to read * or not. If not, advance the ring buffer read index to * next packet. Update the recev_data_len and recev_data_off * to new value. * Return the number of bytes can read. */ static uint32_t hvsock_canread_check(struct hvs_pcb *pcb) { uint32_t advance; uint32_t tlen, hlen, dlen; uint32_t bytes_canread = 0; int error; if (pcb == NULL || pcb->chan == NULL) { pcb->so->so_error = EIO; return (0); } /* Still have data not read yet on current packet */ if (pcb->recv_data_len > 0) return (pcb->recv_data_len); if (pcb->rb_init) advance = VMBUS_CHANPKT_GETLEN(pcb->hvs_pkt.chan_pkt_hdr.cph_tlen); else advance = 0; bytes_canread = vmbus_chan_read_available(pcb->chan); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: bytes_canread on br = %u, advance = %u\n", __func__, bytes_canread, advance); if (pcb->rb_init && bytes_canread == (advance + sizeof(uint64_t))) { /* * Nothing to read. Need to advance the rindex before * calling sbwait, so host knows to wake us up when data * is available to read on rb. */ error = vmbus_chan_recv_idxadv(pcb->chan, advance); if (error) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: after calling vmbus_chan_recv_idxadv, " "got error = %d\n", __func__, error); return (0); } else { pcb->rb_init = false; pcb->recv_data_len = 0; pcb->recv_data_off = 0; bytes_canread = vmbus_chan_read_available(pcb->chan); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: advanced %u bytes, " " bytes_canread on br now = %u\n", __func__, advance, bytes_canread); if (bytes_canread == 0) return (0); else advance = 0; } } if (bytes_canread < advance + (sizeof(struct hvs_pkt_header) + sizeof(uint64_t))) return (0); error = vmbus_chan_recv_peek(pcb->chan, &pcb->hvs_pkt, sizeof(struct hvs_pkt_header), advance); /* Don't have anything to read */ if (error) { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: after calling vmbus_chan_recv_peek, got error = %d\n", __func__, error); return (0); } /* * We just read in a new packet header. Do some sanity checks. */ tlen = VMBUS_CHANPKT_GETLEN(pcb->hvs_pkt.chan_pkt_hdr.cph_tlen); hlen = VMBUS_CHANPKT_GETLEN(pcb->hvs_pkt.chan_pkt_hdr.cph_hlen); dlen = pcb->hvs_pkt.vmpipe_pkt_hdr.vmpipe_data_size; if (__predict_false(hlen < sizeof(struct vmbus_chanpkt_hdr)) || __predict_false(hlen > tlen) || __predict_false(tlen < dlen + sizeof(struct hvs_pkt_header))) { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "invalid tlen(%u), hlen(%u) or dlen(%u)\n", tlen, hlen, dlen); pcb->so->so_error = EIO; return (0); } if (pcb->rb_init == false) pcb->rb_init = true; HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "Got new pkt tlen(%u), hlen(%u) or dlen(%u)\n", tlen, hlen, dlen); /* The other side has sent a close FIN */ if (dlen == 0) { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: Received FIN from other side\n", __func__); /* inform the caller by seting so_error to ESHUTDOWN */ pcb->so->so_error = ESHUTDOWN; } HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: canread on receive ring is %u \n", __func__, dlen); pcb->recv_data_len = dlen; pcb->recv_data_off = 0; return (pcb->recv_data_len); } static uint32_t hvsock_canwrite_check(struct hvs_pcb *pcb) { uint32_t writeable; uint32_t ret; if (pcb == NULL || pcb->chan == NULL) return (0); writeable = vmbus_chan_write_available(pcb->chan); /* * We must always reserve a 0-length-payload packet for the FIN. */ HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: writeable is %u, should be greater than %ju\n", __func__, writeable, (uintmax_t)(HVSOCK_PKT_LEN(1) + HVSOCK_PKT_LEN(0))); if (writeable < HVSOCK_PKT_LEN(1) + HVSOCK_PKT_LEN(0)) { /* * The Tx ring seems full. */ return (0); } ret = writeable - HVSOCK_PKT_LEN(0) - HVSOCK_PKT_LEN(0); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: available size is %u\n", __func__, rounddown2(ret, 8)); return (rounddown2(ret, 8)); } static void hvsock_set_chan_pending_send_size(struct vmbus_channel *chan) { vmbus_chan_set_pending_send_size(chan, HVSOCK_PKT_LEN(HVSOCK_SEND_BUF_SZ)); } static int hvsock_open_channel(struct vmbus_channel *chan, struct socket *so) { unsigned int rcvbuf, sndbuf; struct hvs_pcb *pcb = so2hvspcb(so); int ret; if (vmbus_current_version < VMBUS_VERSION_WIN10_V5) { sndbuf = HVS_RINGBUF_SND_SIZE; rcvbuf = HVS_RINGBUF_RCV_SIZE; } else { sndbuf = MAX(so->so_snd.sb_hiwat, HVS_RINGBUF_SND_SIZE); sndbuf = MIN(sndbuf, HVS_RINGBUF_MAX_SIZE); sndbuf = rounddown2(sndbuf, PAGE_SIZE); rcvbuf = MAX(so->so_rcv.sb_hiwat, HVS_RINGBUF_RCV_SIZE); rcvbuf = MIN(rcvbuf, HVS_RINGBUF_MAX_SIZE); rcvbuf = rounddown2(rcvbuf, PAGE_SIZE); } /* * Can only read whatever user provided size of data * from ring buffer. Turn off batched reading. */ vmbus_chan_set_readbatch(chan, false); ret = vmbus_chan_open(chan, sndbuf, rcvbuf, NULL, 0, hvsock_chan_cb, pcb); if (ret != 0) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: failed to open hvsock channel, sndbuf = %u, " "rcvbuf = %u\n", __func__, sndbuf, rcvbuf); } else { HVSOCK_DBG(HVSOCK_DBG_INFO, "%s: hvsock channel opened, sndbuf = %u, i" "rcvbuf = %u\n", __func__, sndbuf, rcvbuf); /* * Se the pending send size so to receive wakeup * signals from host when there is enough space on * rx buffer ring to write. */ hvsock_set_chan_pending_send_size(chan); } return ret; } /* * Guest is listening passively on the socket. Open channel and * create a new socket for the conneciton. */ static void hvsock_open_conn_passive(struct vmbus_channel *chan, struct socket *so, struct hvsock_sc *sc) { struct socket *new_so; struct hvs_pcb *new_pcb, *pcb; int error; /* Do nothing if socket is not listening */ if (!SOLISTENING(so)) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: socket is not a listening one\n", __func__); return; } /* * Create a new socket. This will call pru_attach to complete * the socket initialization and put the new socket onto * listening socket's sol_incomp list, waiting to be promoted * to sol_comp list. * The new socket created has ref count 0. There is no other * thread that changes the state of this new one at the * moment, so we don't need to hold its lock while opening * channel and filling out its pcb information. */ new_so = sonewconn(so, 0); if (!new_so) HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: creating new socket failed\n", __func__); /* * Now open the vmbus channel. If it fails, the socket will be * on the listening socket's sol_incomp queue until it is * replaced and aborted. */ error = hvsock_open_channel(chan, new_so); if (error) { new_so->so_error = error; return; } pcb = so->so_pcb; new_pcb = new_so->so_pcb; hvs_addr_set(&(new_pcb->local_addr), pcb->local_addr.hvs_port); /* Remote port is unknown to guest in this type of conneciton */ hvs_addr_set(&(new_pcb->remote_addr), HVADDR_PORT_UNKNOWN); new_pcb->chan = chan; new_pcb->recv_data_len = 0; new_pcb->recv_data_off = 0; new_pcb->rb_init = false; new_pcb->vm_srv_id = *vmbus_chan_guid_type(chan); new_pcb->host_srv_id = *vmbus_chan_guid_inst(chan); hvs_insert_socket_on_list(new_so, HVS_LIST_CONNECTED); sc->pcb = new_pcb; /* * Change the socket state to SS_ISCONNECTED. This will promote * the socket to sol_comp queue and wake up the thread which * is accepting connection. */ soisconnected(new_so); } /* * Guest is actively connecting to host. */ static void hvsock_open_conn_active(struct vmbus_channel *chan, struct socket *so) { struct hvs_pcb *pcb; int error; error = hvsock_open_channel(chan, so); if (error) { so->so_error = error; return; } pcb = so->so_pcb; pcb->chan = chan; pcb->recv_data_len = 0; pcb->recv_data_off = 0; pcb->rb_init = false; mtx_lock(&hvs_trans_socks_mtx); __hvs_remove_socket_from_list(so, HVS_LIST_BOUND); __hvs_insert_socket_on_list(so, HVS_LIST_CONNECTED); mtx_unlock(&hvs_trans_socks_mtx); /* * Change the socket state to SS_ISCONNECTED. This will wake up * the thread sleeping in connect call. */ soisconnected(so); } static void hvsock_open_connection(struct vmbus_channel *chan, struct hvsock_sc *sc) { struct hyperv_guid *inst_guid, *type_guid; bool conn_from_host; struct sockaddr_hvs addr; struct socket *so; struct hvs_pcb *pcb; type_guid = (struct hyperv_guid *) vmbus_chan_guid_type(chan); inst_guid = (struct hyperv_guid *) vmbus_chan_guid_inst(chan); conn_from_host = vmbus_chan_is_hvs_conn_from_host(chan); HVSOCK_DBG(HVSOCK_DBG_INFO, "type_guid is "); hvsock_print_guid(type_guid); HVSOCK_DBG(HVSOCK_DBG_INFO, "inst_guid is "); hvsock_print_guid(inst_guid); HVSOCK_DBG(HVSOCK_DBG_INFO, "connection %s host\n", (conn_from_host == true ) ? "from" : "to"); /* * The listening port should be in [0, MAX_LISTEN_PORT] */ if (!is_valid_srv_id(type_guid)) return; /* * There should be a bound socket already created no matter * it is a passive or active connection. * For host initiated connection (passive on guest side), * the type_guid contains the port which guest is bound and * listening. * For the guest initiated connection (active on guest side), * the inst_guid contains the port that guest has auto bound * to. */ hvs_addr_init(&addr, conn_from_host ? type_guid : inst_guid); so = hvs_find_socket_on_list(&addr, HVS_LIST_BOUND); if (!so) { HVSOCK_DBG(HVSOCK_DBG_ERR, "%s: no bound socket found for port %u\n", __func__, addr.hvs_port); return; } if (conn_from_host) { hvsock_open_conn_passive(chan, so, sc); } else { (void) hvs_trans_lock(); pcb = so->so_pcb; if (pcb && pcb->so) { sc->pcb = so2hvspcb(so); hvsock_open_conn_active(chan, so); } else { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "%s: channel detached before open\n", __func__); } hvs_trans_unlock(); } } static int hvsock_probe(device_t dev) { struct vmbus_channel *channel = vmbus_get_channel(dev); if (!channel || !vmbus_chan_is_hvs(channel)) { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "hvsock_probe called but not a hvsock channel id %u\n", vmbus_chan_id(channel)); return ENXIO; } else { HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "hvsock_probe got a hvsock channel id %u\n", vmbus_chan_id(channel)); return BUS_PROBE_DEFAULT; } } static int hvsock_attach(device_t dev) { struct vmbus_channel *channel = vmbus_get_channel(dev); struct hvsock_sc *sc = (struct hvsock_sc *)device_get_softc(dev); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "hvsock_attach called.\n"); hvsock_open_connection(channel, sc); /* * Always return success. On error the host will rescind the device * in 30 seconds and we can do cleanup at that time in * vmbus_chan_msgproc_chrescind(). */ return (0); } static int hvsock_detach(device_t dev) { struct hvsock_sc *sc = (struct hvsock_sc *)device_get_softc(dev); struct socket *so; int retry; if (bootverbose) device_printf(dev, "hvsock_detach called.\n"); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "hvsock_detach called.\n"); if (sc->pcb != NULL) { (void) hvs_trans_lock(); so = hsvpcb2so(sc->pcb); if (so) { /* Close the connection */ if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) soisdisconnected(so); } mtx_lock(&hvs_trans_socks_mtx); __hvs_remove_pcb_from_list(sc->pcb, HVS_LIST_BOUND | HVS_LIST_CONNECTED); mtx_unlock(&hvs_trans_socks_mtx); /* * Close channel while no reader and sender are working * on the buffer rings. */ if (so) { retry = 0; while (SOCK_IO_RECV_LOCK(so, 0) == EWOULDBLOCK) { /* * Someone is reading, rx br is busy */ soisdisconnected(so); DELAY(500); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "waiting for rx reader to exit, " "retry = %d\n", retry++); } retry = 0; while (SOCK_IO_SEND_LOCK(so, 0) == EWOULDBLOCK) { /* * Someone is sending, tx br is busy */ soisdisconnected(so); DELAY(500); HVSOCK_DBG(HVSOCK_DBG_VERBOSE, "waiting for tx sender to exit, " "retry = %d\n", retry++); } } bzero(sc->pcb, sizeof(struct hvs_pcb)); free(sc->pcb, M_HVSOCK); sc->pcb = NULL; if (so) { SOCK_IO_RECV_UNLOCK(so); SOCK_IO_SEND_UNLOCK(so); so->so_pcb = NULL; } hvs_trans_unlock(); } vmbus_chan_close(vmbus_get_channel(dev)); return (0); } static device_method_t hvsock_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hvsock_probe), DEVMETHOD(device_attach, hvsock_attach), DEVMETHOD(device_detach, hvsock_detach), DEVMETHOD_END }; static driver_t hvsock_driver = { "hv_sock", hvsock_methods, sizeof(struct hvsock_sc) }; DRIVER_MODULE(hvsock, vmbus, hvsock_driver, NULL, NULL); MODULE_VERSION(hvsock, 1); MODULE_DEPEND(hvsock, vmbus, 1, 1, 1); diff --git a/sys/dev/hyperv/hvsock/hv_sock.h b/sys/dev/hyperv/hvsock/hv_sock.h index ee6416a29662..e11621d76dbc 100644 --- a/sys/dev/hyperv/hvsock/hv_sock.h +++ b/sys/dev/hyperv/hvsock/hv_sock.h @@ -1,119 +1,119 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _HVSOCK_H #define _HVSOCK_H #include #include #include #include #include /* * HyperV Socket Protocols */ #define HYPERV_SOCK_PROTO_TRANS 1 /* Transport protocol */ #define HVADDR_PORT_ANY -1U #define HVADDR_PORT_UNKNOWN -1U #define HVS_LIST_BOUND 0x01 #define HVS_LIST_CONNECTED 0x02 #define HVS_LIST_ALL (HVS_LIST_BOUND | HVS_LIST_CONNECTED) struct sockaddr_hvs { unsigned char sa_len; sa_family_t sa_family; unsigned int hvs_port; unsigned char hvs_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - sizeof(unsigned char) - sizeof(unsigned int)]; }; struct vmpipe_proto_header { uint32_t vmpipe_pkt_type; uint32_t vmpipe_data_size; } __packed; struct hvs_pkt_header { struct vmbus_chanpkt_hdr chan_pkt_hdr; struct vmpipe_proto_header vmpipe_pkt_hdr; } __packed; struct hvs_pcb { struct socket *so; /* Pointer to socket */ struct sockaddr_hvs local_addr; struct sockaddr_hvs remote_addr; struct hyperv_guid vm_srv_id; struct hyperv_guid host_srv_id; struct vmbus_channel *chan; /* Current packet header on rx ring */ struct hvs_pkt_header hvs_pkt; /* Available data in receive br in current packet */ uint32_t recv_data_len; /* offset in the packet */ uint32_t recv_data_off; bool rb_init; /* Link lists for global bound and connected sockets */ LIST_ENTRY(hvs_pcb) bound_next; LIST_ENTRY(hvs_pcb) connected_next; }; #define so2hvspcb(so) \ ((struct hvs_pcb *)((so)->so_pcb)) #define hsvpcb2so(hvspcb) \ ((struct socket *)((hvspcb)->so)) void hvs_addr_init(struct sockaddr_hvs *, const struct hyperv_guid *); void hvs_trans_close(struct socket *); void hvs_trans_detach(struct socket *); void hvs_trans_abort(struct socket *); int hvs_trans_attach(struct socket *, int, struct thread *); int hvs_trans_bind(struct socket *, struct sockaddr *, struct thread *); int hvs_trans_listen(struct socket *, int, struct thread *); int hvs_trans_accept(struct socket *, struct sockaddr *); int hvs_trans_connect(struct socket *, struct sockaddr *, struct thread *); -int hvs_trans_peeraddr(struct socket *, struct sockaddr **); -int hvs_trans_sockaddr(struct socket *, struct sockaddr **); +int hvs_trans_peeraddr(struct socket *, struct sockaddr *); +int hvs_trans_sockaddr(struct socket *, struct sockaddr *); int hvs_trans_soreceive(struct socket *, struct sockaddr **, struct uio *, struct mbuf **, struct mbuf **, int *); int hvs_trans_sosend(struct socket *, struct sockaddr *, struct uio *, struct mbuf *, struct mbuf *, int, struct thread *); int hvs_trans_disconnect(struct socket *); int hvs_trans_shutdown(struct socket *); int hvs_trans_lock(void); void hvs_trans_unlock(void); void hvs_remove_socket_from_list(struct socket *, unsigned char); #endif /* _HVSOCK_H */ diff --git a/sys/dev/wg/if_wg.c b/sys/dev/wg/if_wg.c index 0ea40b763416..71c80c8b5f77 100644 --- a/sys/dev/wg/if_wg.c +++ b/sys/dev/wg/if_wg.c @@ -1,3056 +1,3058 @@ /* SPDX-License-Identifier: ISC * * Copyright (C) 2015-2021 Jason A. Donenfeld . All Rights Reserved. * Copyright (C) 2019-2021 Matt Dunwoodie * Copyright (c) 2019-2020 Rubicon Communications, LLC (Netgate) * Copyright (c) 2021 Kyle Evans * Copyright (c) 2022 The FreeBSD Foundation */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "wg_noise.h" #include "wg_cookie.h" #include "version.h" #include "if_wg.h" #define DEFAULT_MTU (ETHERMTU - 80) #define MAX_MTU (IF_MAXMTU - 80) #define MAX_STAGED_PKT 128 #define MAX_QUEUED_PKT 1024 #define MAX_QUEUED_PKT_MASK (MAX_QUEUED_PKT - 1) #define MAX_QUEUED_HANDSHAKES 4096 #define REKEY_TIMEOUT_JITTER 334 /* 1/3 sec, round for arc4random_uniform */ #define MAX_TIMER_HANDSHAKES (90 / REKEY_TIMEOUT) #define NEW_HANDSHAKE_TIMEOUT (REKEY_TIMEOUT + KEEPALIVE_TIMEOUT) #define UNDERLOAD_TIMEOUT 1 #define DPRINTF(sc, ...) if (if_getflags(sc->sc_ifp) & IFF_DEBUG) if_printf(sc->sc_ifp, ##__VA_ARGS__) /* First byte indicating packet type on the wire */ #define WG_PKT_INITIATION htole32(1) #define WG_PKT_RESPONSE htole32(2) #define WG_PKT_COOKIE htole32(3) #define WG_PKT_DATA htole32(4) #define WG_PKT_PADDING 16 #define WG_KEY_SIZE 32 struct wg_pkt_initiation { uint32_t t; uint32_t s_idx; uint8_t ue[NOISE_PUBLIC_KEY_LEN]; uint8_t es[NOISE_PUBLIC_KEY_LEN + NOISE_AUTHTAG_LEN]; uint8_t ets[NOISE_TIMESTAMP_LEN + NOISE_AUTHTAG_LEN]; struct cookie_macs m; }; struct wg_pkt_response { uint32_t t; uint32_t s_idx; uint32_t r_idx; uint8_t ue[NOISE_PUBLIC_KEY_LEN]; uint8_t en[0 + NOISE_AUTHTAG_LEN]; struct cookie_macs m; }; struct wg_pkt_cookie { uint32_t t; uint32_t r_idx; uint8_t nonce[COOKIE_NONCE_SIZE]; uint8_t ec[COOKIE_ENCRYPTED_SIZE]; }; struct wg_pkt_data { uint32_t t; uint32_t r_idx; uint64_t nonce; uint8_t buf[]; }; struct wg_endpoint { union { struct sockaddr r_sa; struct sockaddr_in r_sin; #ifdef INET6 struct sockaddr_in6 r_sin6; #endif } e_remote; union { struct in_addr l_in; #ifdef INET6 struct in6_pktinfo l_pktinfo6; #define l_in6 l_pktinfo6.ipi6_addr #endif } e_local; }; struct aip_addr { uint8_t length; union { uint8_t bytes[16]; uint32_t ip; uint32_t ip6[4]; struct in_addr in; struct in6_addr in6; }; }; struct wg_aip { struct radix_node a_nodes[2]; LIST_ENTRY(wg_aip) a_entry; struct aip_addr a_addr; struct aip_addr a_mask; struct wg_peer *a_peer; sa_family_t a_af; }; struct wg_packet { STAILQ_ENTRY(wg_packet) p_serial; STAILQ_ENTRY(wg_packet) p_parallel; struct wg_endpoint p_endpoint; struct noise_keypair *p_keypair; uint64_t p_nonce; struct mbuf *p_mbuf; int p_mtu; sa_family_t p_af; enum wg_ring_state { WG_PACKET_UNCRYPTED, WG_PACKET_CRYPTED, WG_PACKET_DEAD, } p_state; }; STAILQ_HEAD(wg_packet_list, wg_packet); struct wg_queue { struct mtx q_mtx; struct wg_packet_list q_queue; size_t q_len; }; struct wg_peer { TAILQ_ENTRY(wg_peer) p_entry; uint64_t p_id; struct wg_softc *p_sc; struct noise_remote *p_remote; struct cookie_maker p_cookie; struct rwlock p_endpoint_lock; struct wg_endpoint p_endpoint; struct wg_queue p_stage_queue; struct wg_queue p_encrypt_serial; struct wg_queue p_decrypt_serial; bool p_enabled; bool p_need_another_keepalive; uint16_t p_persistent_keepalive_interval; struct callout p_new_handshake; struct callout p_send_keepalive; struct callout p_retry_handshake; struct callout p_zero_key_material; struct callout p_persistent_keepalive; struct mtx p_handshake_mtx; struct timespec p_handshake_complete; /* nanotime */ int p_handshake_retries; struct grouptask p_send; struct grouptask p_recv; counter_u64_t p_tx_bytes; counter_u64_t p_rx_bytes; LIST_HEAD(, wg_aip) p_aips; size_t p_aips_num; }; struct wg_socket { struct socket *so_so4; struct socket *so_so6; uint32_t so_user_cookie; int so_fibnum; in_port_t so_port; }; struct wg_softc { LIST_ENTRY(wg_softc) sc_entry; if_t sc_ifp; int sc_flags; struct ucred *sc_ucred; struct wg_socket sc_socket; TAILQ_HEAD(,wg_peer) sc_peers; size_t sc_peers_num; struct noise_local *sc_local; struct cookie_checker sc_cookie; struct radix_node_head *sc_aip4; struct radix_node_head *sc_aip6; struct grouptask sc_handshake; struct wg_queue sc_handshake_queue; struct grouptask *sc_encrypt; struct grouptask *sc_decrypt; struct wg_queue sc_encrypt_parallel; struct wg_queue sc_decrypt_parallel; u_int sc_encrypt_last_cpu; u_int sc_decrypt_last_cpu; struct sx sc_lock; }; #define WGF_DYING 0x0001 #define MAX_LOOPS 8 #define MTAG_WGLOOP 0x77676c70 /* wglp */ #ifndef ENOKEY #define ENOKEY ENOTCAPABLE #endif #define GROUPTASK_DRAIN(gtask) \ gtaskqueue_drain((gtask)->gt_taskqueue, &(gtask)->gt_task) #define BPF_MTAP2_AF(ifp, m, af) do { \ uint32_t __bpf_tap_af = (af); \ BPF_MTAP2(ifp, &__bpf_tap_af, sizeof(__bpf_tap_af), m); \ } while (0) static int clone_count; static uma_zone_t wg_packet_zone; static volatile unsigned long peer_counter = 0; static const char wgname[] = "wg"; static unsigned wg_osd_jail_slot; static struct sx wg_sx; SX_SYSINIT(wg_sx, &wg_sx, "wg_sx"); static LIST_HEAD(, wg_softc) wg_list = LIST_HEAD_INITIALIZER(wg_list); static TASKQGROUP_DEFINE(wg_tqg, mp_ncpus, 1); MALLOC_DEFINE(M_WG, "WG", "wireguard"); VNET_DEFINE_STATIC(struct if_clone *, wg_cloner); #define V_wg_cloner VNET(wg_cloner) #define WG_CAPS IFCAP_LINKSTATE struct wg_timespec64 { uint64_t tv_sec; uint64_t tv_nsec; }; static int wg_socket_init(struct wg_softc *, in_port_t); static int wg_socket_bind(struct socket **, struct socket **, in_port_t *); static void wg_socket_set(struct wg_softc *, struct socket *, struct socket *); static void wg_socket_uninit(struct wg_softc *); static int wg_socket_set_sockopt(struct socket *, struct socket *, int, void *, size_t); static int wg_socket_set_cookie(struct wg_softc *, uint32_t); static int wg_socket_set_fibnum(struct wg_softc *, int); static int wg_send(struct wg_softc *, struct wg_endpoint *, struct mbuf *); static void wg_timers_enable(struct wg_peer *); static void wg_timers_disable(struct wg_peer *); static void wg_timers_set_persistent_keepalive(struct wg_peer *, uint16_t); static void wg_timers_get_last_handshake(struct wg_peer *, struct wg_timespec64 *); static void wg_timers_event_data_sent(struct wg_peer *); static void wg_timers_event_data_received(struct wg_peer *); static void wg_timers_event_any_authenticated_packet_sent(struct wg_peer *); static void wg_timers_event_any_authenticated_packet_received(struct wg_peer *); static void wg_timers_event_any_authenticated_packet_traversal(struct wg_peer *); static void wg_timers_event_handshake_initiated(struct wg_peer *); static void wg_timers_event_handshake_complete(struct wg_peer *); static void wg_timers_event_session_derived(struct wg_peer *); static void wg_timers_event_want_initiation(struct wg_peer *); static void wg_timers_run_send_initiation(struct wg_peer *, bool); static void wg_timers_run_retry_handshake(void *); static void wg_timers_run_send_keepalive(void *); static void wg_timers_run_new_handshake(void *); static void wg_timers_run_zero_key_material(void *); static void wg_timers_run_persistent_keepalive(void *); static int wg_aip_add(struct wg_softc *, struct wg_peer *, sa_family_t, const void *, uint8_t); static struct wg_peer *wg_aip_lookup(struct wg_softc *, sa_family_t, void *); static void wg_aip_remove_all(struct wg_softc *, struct wg_peer *); static struct wg_peer *wg_peer_alloc(struct wg_softc *, const uint8_t [WG_KEY_SIZE]); static void wg_peer_free_deferred(struct noise_remote *); static void wg_peer_destroy(struct wg_peer *); static void wg_peer_destroy_all(struct wg_softc *); static void wg_peer_send_buf(struct wg_peer *, uint8_t *, size_t); static void wg_send_initiation(struct wg_peer *); static void wg_send_response(struct wg_peer *); static void wg_send_cookie(struct wg_softc *, struct cookie_macs *, uint32_t, struct wg_endpoint *); static void wg_peer_set_endpoint(struct wg_peer *, struct wg_endpoint *); static void wg_peer_clear_src(struct wg_peer *); static void wg_peer_get_endpoint(struct wg_peer *, struct wg_endpoint *); static void wg_send_buf(struct wg_softc *, struct wg_endpoint *, uint8_t *, size_t); static void wg_send_keepalive(struct wg_peer *); static void wg_handshake(struct wg_softc *, struct wg_packet *); static void wg_encrypt(struct wg_softc *, struct wg_packet *); static void wg_decrypt(struct wg_softc *, struct wg_packet *); static void wg_softc_handshake_receive(struct wg_softc *); static void wg_softc_decrypt(struct wg_softc *); static void wg_softc_encrypt(struct wg_softc *); static void wg_encrypt_dispatch(struct wg_softc *); static void wg_decrypt_dispatch(struct wg_softc *); static void wg_deliver_out(struct wg_peer *); static void wg_deliver_in(struct wg_peer *); static struct wg_packet *wg_packet_alloc(struct mbuf *); static void wg_packet_free(struct wg_packet *); static void wg_queue_init(struct wg_queue *, const char *); static void wg_queue_deinit(struct wg_queue *); static size_t wg_queue_len(struct wg_queue *); static int wg_queue_enqueue_handshake(struct wg_queue *, struct wg_packet *); static struct wg_packet *wg_queue_dequeue_handshake(struct wg_queue *); static void wg_queue_push_staged(struct wg_queue *, struct wg_packet *); static void wg_queue_enlist_staged(struct wg_queue *, struct wg_packet_list *); static void wg_queue_delist_staged(struct wg_queue *, struct wg_packet_list *); static void wg_queue_purge(struct wg_queue *); static int wg_queue_both(struct wg_queue *, struct wg_queue *, struct wg_packet *); static struct wg_packet *wg_queue_dequeue_serial(struct wg_queue *); static struct wg_packet *wg_queue_dequeue_parallel(struct wg_queue *); static bool wg_input(struct mbuf *, int, struct inpcb *, const struct sockaddr *, void *); static void wg_peer_send_staged(struct wg_peer *); static int wg_clone_create(struct if_clone *ifc, char *name, size_t len, struct ifc_data *ifd, if_t *ifpp); static void wg_qflush(if_t); static inline int determine_af_and_pullup(struct mbuf **m, sa_family_t *af); static int wg_xmit(if_t, struct mbuf *, sa_family_t, uint32_t); static int wg_transmit(if_t, struct mbuf *); static int wg_output(if_t, struct mbuf *, const struct sockaddr *, struct route *); static int wg_clone_destroy(struct if_clone *ifc, if_t ifp, uint32_t flags); static bool wgc_privileged(struct wg_softc *); static int wgc_get(struct wg_softc *, struct wg_data_io *); static int wgc_set(struct wg_softc *, struct wg_data_io *); static int wg_up(struct wg_softc *); static void wg_down(struct wg_softc *); static void wg_reassign(if_t, struct vnet *, char *unused); static void wg_init(void *); static int wg_ioctl(if_t, u_long, caddr_t); static void vnet_wg_init(const void *); static void vnet_wg_uninit(const void *); static int wg_module_init(void); static void wg_module_deinit(void); /* TODO Peer */ static struct wg_peer * wg_peer_alloc(struct wg_softc *sc, const uint8_t pub_key[WG_KEY_SIZE]) { struct wg_peer *peer; sx_assert(&sc->sc_lock, SX_XLOCKED); peer = malloc(sizeof(*peer), M_WG, M_WAITOK | M_ZERO); peer->p_remote = noise_remote_alloc(sc->sc_local, peer, pub_key); peer->p_tx_bytes = counter_u64_alloc(M_WAITOK); peer->p_rx_bytes = counter_u64_alloc(M_WAITOK); peer->p_id = peer_counter++; peer->p_sc = sc; cookie_maker_init(&peer->p_cookie, pub_key); rw_init(&peer->p_endpoint_lock, "wg_peer_endpoint"); wg_queue_init(&peer->p_stage_queue, "stageq"); wg_queue_init(&peer->p_encrypt_serial, "txq"); wg_queue_init(&peer->p_decrypt_serial, "rxq"); peer->p_enabled = false; peer->p_need_another_keepalive = false; peer->p_persistent_keepalive_interval = 0; callout_init(&peer->p_new_handshake, true); callout_init(&peer->p_send_keepalive, true); callout_init(&peer->p_retry_handshake, true); callout_init(&peer->p_persistent_keepalive, true); callout_init(&peer->p_zero_key_material, true); mtx_init(&peer->p_handshake_mtx, "peer handshake", NULL, MTX_DEF); bzero(&peer->p_handshake_complete, sizeof(peer->p_handshake_complete)); peer->p_handshake_retries = 0; GROUPTASK_INIT(&peer->p_send, 0, (gtask_fn_t *)wg_deliver_out, peer); taskqgroup_attach(qgroup_wg_tqg, &peer->p_send, peer, NULL, NULL, "wg send"); GROUPTASK_INIT(&peer->p_recv, 0, (gtask_fn_t *)wg_deliver_in, peer); taskqgroup_attach(qgroup_wg_tqg, &peer->p_recv, peer, NULL, NULL, "wg recv"); LIST_INIT(&peer->p_aips); peer->p_aips_num = 0; return (peer); } static void wg_peer_free_deferred(struct noise_remote *r) { struct wg_peer *peer = noise_remote_arg(r); /* While there are no references remaining, we may still have * p_{send,recv} executing (think empty queue, but wg_deliver_{in,out} * needs to check the queue. We should wait for them and then free. */ GROUPTASK_DRAIN(&peer->p_recv); GROUPTASK_DRAIN(&peer->p_send); taskqgroup_detach(qgroup_wg_tqg, &peer->p_recv); taskqgroup_detach(qgroup_wg_tqg, &peer->p_send); wg_queue_deinit(&peer->p_decrypt_serial); wg_queue_deinit(&peer->p_encrypt_serial); wg_queue_deinit(&peer->p_stage_queue); counter_u64_free(peer->p_tx_bytes); counter_u64_free(peer->p_rx_bytes); rw_destroy(&peer->p_endpoint_lock); mtx_destroy(&peer->p_handshake_mtx); cookie_maker_free(&peer->p_cookie); free(peer, M_WG); } static void wg_peer_destroy(struct wg_peer *peer) { struct wg_softc *sc = peer->p_sc; sx_assert(&sc->sc_lock, SX_XLOCKED); /* Disable remote and timers. This will prevent any new handshakes * occuring. */ noise_remote_disable(peer->p_remote); wg_timers_disable(peer); /* Now we can remove all allowed IPs so no more packets will be routed * to the peer. */ wg_aip_remove_all(sc, peer); /* Remove peer from the interface, then free. Some references may still * exist to p_remote, so noise_remote_free will wait until they're all * put to call wg_peer_free_deferred. */ sc->sc_peers_num--; TAILQ_REMOVE(&sc->sc_peers, peer, p_entry); DPRINTF(sc, "Peer %" PRIu64 " destroyed\n", peer->p_id); noise_remote_free(peer->p_remote, wg_peer_free_deferred); } static void wg_peer_destroy_all(struct wg_softc *sc) { struct wg_peer *peer, *tpeer; TAILQ_FOREACH_SAFE(peer, &sc->sc_peers, p_entry, tpeer) wg_peer_destroy(peer); } static void wg_peer_set_endpoint(struct wg_peer *peer, struct wg_endpoint *e) { MPASS(e->e_remote.r_sa.sa_family != 0); if (memcmp(e, &peer->p_endpoint, sizeof(*e)) == 0) return; rw_wlock(&peer->p_endpoint_lock); peer->p_endpoint = *e; rw_wunlock(&peer->p_endpoint_lock); } static void wg_peer_clear_src(struct wg_peer *peer) { rw_wlock(&peer->p_endpoint_lock); bzero(&peer->p_endpoint.e_local, sizeof(peer->p_endpoint.e_local)); rw_wunlock(&peer->p_endpoint_lock); } static void wg_peer_get_endpoint(struct wg_peer *peer, struct wg_endpoint *e) { rw_rlock(&peer->p_endpoint_lock); *e = peer->p_endpoint; rw_runlock(&peer->p_endpoint_lock); } /* Allowed IP */ static int wg_aip_add(struct wg_softc *sc, struct wg_peer *peer, sa_family_t af, const void *addr, uint8_t cidr) { struct radix_node_head *root; struct radix_node *node; struct wg_aip *aip; int ret = 0; aip = malloc(sizeof(*aip), M_WG, M_WAITOK | M_ZERO); aip->a_peer = peer; aip->a_af = af; switch (af) { #ifdef INET case AF_INET: if (cidr > 32) cidr = 32; root = sc->sc_aip4; aip->a_addr.in = *(const struct in_addr *)addr; aip->a_mask.ip = htonl(~((1LL << (32 - cidr)) - 1) & 0xffffffff); aip->a_addr.ip &= aip->a_mask.ip; aip->a_addr.length = aip->a_mask.length = offsetof(struct aip_addr, in) + sizeof(struct in_addr); break; #endif #ifdef INET6 case AF_INET6: if (cidr > 128) cidr = 128; root = sc->sc_aip6; aip->a_addr.in6 = *(const struct in6_addr *)addr; in6_prefixlen2mask(&aip->a_mask.in6, cidr); for (int i = 0; i < 4; i++) aip->a_addr.ip6[i] &= aip->a_mask.ip6[i]; aip->a_addr.length = aip->a_mask.length = offsetof(struct aip_addr, in6) + sizeof(struct in6_addr); break; #endif default: free(aip, M_WG); return (EAFNOSUPPORT); } RADIX_NODE_HEAD_LOCK(root); node = root->rnh_addaddr(&aip->a_addr, &aip->a_mask, &root->rh, aip->a_nodes); if (node == aip->a_nodes) { LIST_INSERT_HEAD(&peer->p_aips, aip, a_entry); peer->p_aips_num++; } else if (!node) node = root->rnh_lookup(&aip->a_addr, &aip->a_mask, &root->rh); if (!node) { free(aip, M_WG); ret = ENOMEM; } else if (node != aip->a_nodes) { free(aip, M_WG); aip = (struct wg_aip *)node; if (aip->a_peer != peer) { LIST_REMOVE(aip, a_entry); aip->a_peer->p_aips_num--; aip->a_peer = peer; LIST_INSERT_HEAD(&peer->p_aips, aip, a_entry); aip->a_peer->p_aips_num++; } } RADIX_NODE_HEAD_UNLOCK(root); return (ret); } static struct wg_peer * wg_aip_lookup(struct wg_softc *sc, sa_family_t af, void *a) { struct radix_node_head *root; struct radix_node *node; struct wg_peer *peer; struct aip_addr addr; RADIX_NODE_HEAD_RLOCK_TRACKER; switch (af) { case AF_INET: root = sc->sc_aip4; memcpy(&addr.in, a, sizeof(addr.in)); addr.length = offsetof(struct aip_addr, in) + sizeof(struct in_addr); break; case AF_INET6: root = sc->sc_aip6; memcpy(&addr.in6, a, sizeof(addr.in6)); addr.length = offsetof(struct aip_addr, in6) + sizeof(struct in6_addr); break; default: return NULL; } RADIX_NODE_HEAD_RLOCK(root); node = root->rnh_matchaddr(&addr, &root->rh); if (node != NULL) { peer = ((struct wg_aip *)node)->a_peer; noise_remote_ref(peer->p_remote); } else { peer = NULL; } RADIX_NODE_HEAD_RUNLOCK(root); return (peer); } static void wg_aip_remove_all(struct wg_softc *sc, struct wg_peer *peer) { struct wg_aip *aip, *taip; RADIX_NODE_HEAD_LOCK(sc->sc_aip4); LIST_FOREACH_SAFE(aip, &peer->p_aips, a_entry, taip) { if (aip->a_af == AF_INET) { if (sc->sc_aip4->rnh_deladdr(&aip->a_addr, &aip->a_mask, &sc->sc_aip4->rh) == NULL) panic("failed to delete aip %p", aip); LIST_REMOVE(aip, a_entry); peer->p_aips_num--; free(aip, M_WG); } } RADIX_NODE_HEAD_UNLOCK(sc->sc_aip4); RADIX_NODE_HEAD_LOCK(sc->sc_aip6); LIST_FOREACH_SAFE(aip, &peer->p_aips, a_entry, taip) { if (aip->a_af == AF_INET6) { if (sc->sc_aip6->rnh_deladdr(&aip->a_addr, &aip->a_mask, &sc->sc_aip6->rh) == NULL) panic("failed to delete aip %p", aip); LIST_REMOVE(aip, a_entry); peer->p_aips_num--; free(aip, M_WG); } } RADIX_NODE_HEAD_UNLOCK(sc->sc_aip6); if (!LIST_EMPTY(&peer->p_aips) || peer->p_aips_num != 0) panic("wg_aip_remove_all could not delete all %p", peer); } static int wg_socket_init(struct wg_softc *sc, in_port_t port) { struct ucred *cred = sc->sc_ucred; struct socket *so4 = NULL, *so6 = NULL; int rc; sx_assert(&sc->sc_lock, SX_XLOCKED); if (!cred) return (EBUSY); /* * For socket creation, we use the creds of the thread that created the * tunnel rather than the current thread to maintain the semantics that * WireGuard has on Linux with network namespaces -- that the sockets * are created in their home vnet so that they can be configured and * functionally attached to a foreign vnet as the jail's only interface * to the network. */ #ifdef INET rc = socreate(AF_INET, &so4, SOCK_DGRAM, IPPROTO_UDP, cred, curthread); if (rc) goto out; rc = udp_set_kernel_tunneling(so4, wg_input, NULL, sc); /* * udp_set_kernel_tunneling can only fail if there is already a tunneling function set. * This should never happen with a new socket. */ MPASS(rc == 0); #endif #ifdef INET6 rc = socreate(AF_INET6, &so6, SOCK_DGRAM, IPPROTO_UDP, cred, curthread); if (rc) goto out; rc = udp_set_kernel_tunneling(so6, wg_input, NULL, sc); MPASS(rc == 0); #endif if (sc->sc_socket.so_user_cookie) { rc = wg_socket_set_sockopt(so4, so6, SO_USER_COOKIE, &sc->sc_socket.so_user_cookie, sizeof(sc->sc_socket.so_user_cookie)); if (rc) goto out; } rc = wg_socket_set_sockopt(so4, so6, SO_SETFIB, &sc->sc_socket.so_fibnum, sizeof(sc->sc_socket.so_fibnum)); if (rc) goto out; rc = wg_socket_bind(&so4, &so6, &port); if (!rc) { sc->sc_socket.so_port = port; wg_socket_set(sc, so4, so6); } out: if (rc) { if (so4 != NULL) soclose(so4); if (so6 != NULL) soclose(so6); } return (rc); } static int wg_socket_set_sockopt(struct socket *so4, struct socket *so6, int name, void *val, size_t len) { int ret4 = 0, ret6 = 0; struct sockopt sopt = { .sopt_dir = SOPT_SET, .sopt_level = SOL_SOCKET, .sopt_name = name, .sopt_val = val, .sopt_valsize = len }; if (so4) ret4 = sosetopt(so4, &sopt); if (so6) ret6 = sosetopt(so6, &sopt); return (ret4 ?: ret6); } static int wg_socket_set_cookie(struct wg_softc *sc, uint32_t user_cookie) { struct wg_socket *so = &sc->sc_socket; int ret; sx_assert(&sc->sc_lock, SX_XLOCKED); ret = wg_socket_set_sockopt(so->so_so4, so->so_so6, SO_USER_COOKIE, &user_cookie, sizeof(user_cookie)); if (!ret) so->so_user_cookie = user_cookie; return (ret); } static int wg_socket_set_fibnum(struct wg_softc *sc, int fibnum) { struct wg_socket *so = &sc->sc_socket; int ret; sx_assert(&sc->sc_lock, SX_XLOCKED); ret = wg_socket_set_sockopt(so->so_so4, so->so_so6, SO_SETFIB, &fibnum, sizeof(fibnum)); if (!ret) so->so_fibnum = fibnum; return (ret); } static void wg_socket_uninit(struct wg_softc *sc) { wg_socket_set(sc, NULL, NULL); } static void wg_socket_set(struct wg_softc *sc, struct socket *new_so4, struct socket *new_so6) { struct wg_socket *so = &sc->sc_socket; struct socket *so4, *so6; sx_assert(&sc->sc_lock, SX_XLOCKED); so4 = atomic_load_ptr(&so->so_so4); so6 = atomic_load_ptr(&so->so_so6); atomic_store_ptr(&so->so_so4, new_so4); atomic_store_ptr(&so->so_so6, new_so6); if (!so4 && !so6) return; NET_EPOCH_WAIT(); if (so4) soclose(so4); if (so6) soclose(so6); } static int wg_socket_bind(struct socket **in_so4, struct socket **in_so6, in_port_t *requested_port) { struct socket *so4 = *in_so4, *so6 = *in_so6; int ret4 = 0, ret6 = 0; in_port_t port = *requested_port; struct sockaddr_in sin = { .sin_len = sizeof(struct sockaddr_in), .sin_family = AF_INET, .sin_port = htons(port) }; struct sockaddr_in6 sin6 = { .sin6_len = sizeof(struct sockaddr_in6), .sin6_family = AF_INET6, .sin6_port = htons(port) }; if (so4) { ret4 = sobind(so4, (struct sockaddr *)&sin, curthread); if (ret4 && ret4 != EADDRNOTAVAIL) return (ret4); if (!ret4 && !sin.sin_port) { - struct sockaddr_in *bound_sin; - int ret = so4->so_proto->pr_sockaddr(so4, - (struct sockaddr **)&bound_sin); + struct sockaddr_in bound_sin = + { .sin_len = sizeof(bound_sin) }; + int ret; + + ret = sosockaddr(so4, (struct sockaddr *)&bound_sin); if (ret) return (ret); - port = ntohs(bound_sin->sin_port); - sin6.sin6_port = bound_sin->sin_port; - free(bound_sin, M_SONAME); + port = ntohs(bound_sin.sin_port); + sin6.sin6_port = bound_sin.sin_port; } } if (so6) { ret6 = sobind(so6, (struct sockaddr *)&sin6, curthread); if (ret6 && ret6 != EADDRNOTAVAIL) return (ret6); if (!ret6 && !sin6.sin6_port) { - struct sockaddr_in6 *bound_sin6; - int ret = so6->so_proto->pr_sockaddr(so6, - (struct sockaddr **)&bound_sin6); + struct sockaddr_in6 bound_sin6 = + { .sin6_len = sizeof(bound_sin6) }; + int ret; + + ret = sosockaddr(so6, (struct sockaddr *)&bound_sin6); if (ret) return (ret); - port = ntohs(bound_sin6->sin6_port); - free(bound_sin6, M_SONAME); + port = ntohs(bound_sin6.sin6_port); } } if (ret4 && ret6) return (ret4); *requested_port = port; if (ret4 && !ret6 && so4) { soclose(so4); *in_so4 = NULL; } else if (ret6 && !ret4 && so6) { soclose(so6); *in_so6 = NULL; } return (0); } static int wg_send(struct wg_softc *sc, struct wg_endpoint *e, struct mbuf *m) { struct epoch_tracker et; struct sockaddr *sa; struct wg_socket *so = &sc->sc_socket; struct socket *so4, *so6; struct mbuf *control = NULL; int ret = 0; size_t len = m->m_pkthdr.len; /* Get local control address before locking */ if (e->e_remote.r_sa.sa_family == AF_INET) { if (e->e_local.l_in.s_addr != INADDR_ANY) control = sbcreatecontrol((caddr_t)&e->e_local.l_in, sizeof(struct in_addr), IP_SENDSRCADDR, IPPROTO_IP, M_NOWAIT); #ifdef INET6 } else if (e->e_remote.r_sa.sa_family == AF_INET6) { if (!IN6_IS_ADDR_UNSPECIFIED(&e->e_local.l_in6)) control = sbcreatecontrol((caddr_t)&e->e_local.l_pktinfo6, sizeof(struct in6_pktinfo), IPV6_PKTINFO, IPPROTO_IPV6, M_NOWAIT); #endif } else { m_freem(m); return (EAFNOSUPPORT); } /* Get remote address */ sa = &e->e_remote.r_sa; NET_EPOCH_ENTER(et); so4 = atomic_load_ptr(&so->so_so4); so6 = atomic_load_ptr(&so->so_so6); if (e->e_remote.r_sa.sa_family == AF_INET && so4 != NULL) ret = sosend(so4, sa, NULL, m, control, 0, curthread); else if (e->e_remote.r_sa.sa_family == AF_INET6 && so6 != NULL) ret = sosend(so6, sa, NULL, m, control, 0, curthread); else { ret = ENOTCONN; m_freem(control); m_freem(m); } NET_EPOCH_EXIT(et); if (ret == 0) { if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len); } return (ret); } static void wg_send_buf(struct wg_softc *sc, struct wg_endpoint *e, uint8_t *buf, size_t len) { struct mbuf *m; int ret = 0; bool retried = false; retry: m = m_get2(len, M_NOWAIT, MT_DATA, M_PKTHDR); if (!m) { ret = ENOMEM; goto out; } m_copyback(m, 0, len, buf); if (ret == 0) { ret = wg_send(sc, e, m); /* Retry if we couldn't bind to e->e_local */ if (ret == EADDRNOTAVAIL && !retried) { bzero(&e->e_local, sizeof(e->e_local)); retried = true; goto retry; } } else { ret = wg_send(sc, e, m); } out: if (ret) DPRINTF(sc, "Unable to send packet: %d\n", ret); } /* Timers */ static void wg_timers_enable(struct wg_peer *peer) { atomic_store_bool(&peer->p_enabled, true); wg_timers_run_persistent_keepalive(peer); } static void wg_timers_disable(struct wg_peer *peer) { /* By setting p_enabled = false, then calling NET_EPOCH_WAIT, we can be * sure no new handshakes are created after the wait. This is because * all callout_resets (scheduling the callout) are guarded by * p_enabled. We can be sure all sections that read p_enabled and then * optionally call callout_reset are finished as they are surrounded by * NET_EPOCH_{ENTER,EXIT}. * * However, as new callouts may be scheduled during NET_EPOCH_WAIT (but * not after), we stop all callouts leaving no callouts active. * * We should also pull NET_EPOCH_WAIT out of the FOREACH(peer) loops, but the * performance impact is acceptable for the time being. */ atomic_store_bool(&peer->p_enabled, false); NET_EPOCH_WAIT(); atomic_store_bool(&peer->p_need_another_keepalive, false); callout_stop(&peer->p_new_handshake); callout_stop(&peer->p_send_keepalive); callout_stop(&peer->p_retry_handshake); callout_stop(&peer->p_persistent_keepalive); callout_stop(&peer->p_zero_key_material); } static void wg_timers_set_persistent_keepalive(struct wg_peer *peer, uint16_t interval) { struct epoch_tracker et; if (interval != peer->p_persistent_keepalive_interval) { atomic_store_16(&peer->p_persistent_keepalive_interval, interval); NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled)) wg_timers_run_persistent_keepalive(peer); NET_EPOCH_EXIT(et); } } static void wg_timers_get_last_handshake(struct wg_peer *peer, struct wg_timespec64 *time) { mtx_lock(&peer->p_handshake_mtx); time->tv_sec = peer->p_handshake_complete.tv_sec; time->tv_nsec = peer->p_handshake_complete.tv_nsec; mtx_unlock(&peer->p_handshake_mtx); } static void wg_timers_event_data_sent(struct wg_peer *peer) { struct epoch_tracker et; NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake)) callout_reset(&peer->p_new_handshake, MSEC_2_TICKS( NEW_HANDSHAKE_TIMEOUT * 1000 + arc4random_uniform(REKEY_TIMEOUT_JITTER)), wg_timers_run_new_handshake, peer); NET_EPOCH_EXIT(et); } static void wg_timers_event_data_received(struct wg_peer *peer) { struct epoch_tracker et; NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled)) { if (!callout_pending(&peer->p_send_keepalive)) callout_reset(&peer->p_send_keepalive, MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000), wg_timers_run_send_keepalive, peer); else atomic_store_bool(&peer->p_need_another_keepalive, true); } NET_EPOCH_EXIT(et); } static void wg_timers_event_any_authenticated_packet_sent(struct wg_peer *peer) { callout_stop(&peer->p_send_keepalive); } static void wg_timers_event_any_authenticated_packet_received(struct wg_peer *peer) { callout_stop(&peer->p_new_handshake); } static void wg_timers_event_any_authenticated_packet_traversal(struct wg_peer *peer) { struct epoch_tracker et; uint16_t interval; NET_EPOCH_ENTER(et); interval = atomic_load_16(&peer->p_persistent_keepalive_interval); if (atomic_load_bool(&peer->p_enabled) && interval > 0) callout_reset(&peer->p_persistent_keepalive, MSEC_2_TICKS(interval * 1000), wg_timers_run_persistent_keepalive, peer); NET_EPOCH_EXIT(et); } static void wg_timers_event_handshake_initiated(struct wg_peer *peer) { struct epoch_tracker et; NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled)) callout_reset(&peer->p_retry_handshake, MSEC_2_TICKS( REKEY_TIMEOUT * 1000 + arc4random_uniform(REKEY_TIMEOUT_JITTER)), wg_timers_run_retry_handshake, peer); NET_EPOCH_EXIT(et); } static void wg_timers_event_handshake_complete(struct wg_peer *peer) { struct epoch_tracker et; NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled)) { mtx_lock(&peer->p_handshake_mtx); callout_stop(&peer->p_retry_handshake); peer->p_handshake_retries = 0; getnanotime(&peer->p_handshake_complete); mtx_unlock(&peer->p_handshake_mtx); wg_timers_run_send_keepalive(peer); } NET_EPOCH_EXIT(et); } static void wg_timers_event_session_derived(struct wg_peer *peer) { struct epoch_tracker et; NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled)) callout_reset(&peer->p_zero_key_material, MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000), wg_timers_run_zero_key_material, peer); NET_EPOCH_EXIT(et); } static void wg_timers_event_want_initiation(struct wg_peer *peer) { struct epoch_tracker et; NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled)) wg_timers_run_send_initiation(peer, false); NET_EPOCH_EXIT(et); } static void wg_timers_run_send_initiation(struct wg_peer *peer, bool is_retry) { if (!is_retry) peer->p_handshake_retries = 0; if (noise_remote_initiation_expired(peer->p_remote) == ETIMEDOUT) wg_send_initiation(peer); } static void wg_timers_run_retry_handshake(void *_peer) { struct epoch_tracker et; struct wg_peer *peer = _peer; mtx_lock(&peer->p_handshake_mtx); if (peer->p_handshake_retries <= MAX_TIMER_HANDSHAKES) { peer->p_handshake_retries++; mtx_unlock(&peer->p_handshake_mtx); DPRINTF(peer->p_sc, "Handshake for peer %" PRIu64 " did not complete " "after %d seconds, retrying (try %d)\n", peer->p_id, REKEY_TIMEOUT, peer->p_handshake_retries + 1); wg_peer_clear_src(peer); wg_timers_run_send_initiation(peer, true); } else { mtx_unlock(&peer->p_handshake_mtx); DPRINTF(peer->p_sc, "Handshake for peer %" PRIu64 " did not complete " "after %d retries, giving up\n", peer->p_id, MAX_TIMER_HANDSHAKES + 2); callout_stop(&peer->p_send_keepalive); wg_queue_purge(&peer->p_stage_queue); NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_zero_key_material)) callout_reset(&peer->p_zero_key_material, MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000), wg_timers_run_zero_key_material, peer); NET_EPOCH_EXIT(et); } } static void wg_timers_run_send_keepalive(void *_peer) { struct epoch_tracker et; struct wg_peer *peer = _peer; wg_send_keepalive(peer); NET_EPOCH_ENTER(et); if (atomic_load_bool(&peer->p_enabled) && atomic_load_bool(&peer->p_need_another_keepalive)) { atomic_store_bool(&peer->p_need_another_keepalive, false); callout_reset(&peer->p_send_keepalive, MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000), wg_timers_run_send_keepalive, peer); } NET_EPOCH_EXIT(et); } static void wg_timers_run_new_handshake(void *_peer) { struct wg_peer *peer = _peer; DPRINTF(peer->p_sc, "Retrying handshake with peer %" PRIu64 " because we " "stopped hearing back after %d seconds\n", peer->p_id, NEW_HANDSHAKE_TIMEOUT); wg_peer_clear_src(peer); wg_timers_run_send_initiation(peer, false); } static void wg_timers_run_zero_key_material(void *_peer) { struct wg_peer *peer = _peer; DPRINTF(peer->p_sc, "Zeroing out keys for peer %" PRIu64 ", since we " "haven't received a new one in %d seconds\n", peer->p_id, REJECT_AFTER_TIME * 3); noise_remote_keypairs_clear(peer->p_remote); } static void wg_timers_run_persistent_keepalive(void *_peer) { struct wg_peer *peer = _peer; if (atomic_load_16(&peer->p_persistent_keepalive_interval) > 0) wg_send_keepalive(peer); } /* TODO Handshake */ static void wg_peer_send_buf(struct wg_peer *peer, uint8_t *buf, size_t len) { struct wg_endpoint endpoint; counter_u64_add(peer->p_tx_bytes, len); wg_timers_event_any_authenticated_packet_traversal(peer); wg_timers_event_any_authenticated_packet_sent(peer); wg_peer_get_endpoint(peer, &endpoint); wg_send_buf(peer->p_sc, &endpoint, buf, len); } static void wg_send_initiation(struct wg_peer *peer) { struct wg_pkt_initiation pkt; if (noise_create_initiation(peer->p_remote, &pkt.s_idx, pkt.ue, pkt.es, pkt.ets) != 0) return; DPRINTF(peer->p_sc, "Sending handshake initiation to peer %" PRIu64 "\n", peer->p_id); pkt.t = WG_PKT_INITIATION; cookie_maker_mac(&peer->p_cookie, &pkt.m, &pkt, sizeof(pkt) - sizeof(pkt.m)); wg_peer_send_buf(peer, (uint8_t *)&pkt, sizeof(pkt)); wg_timers_event_handshake_initiated(peer); } static void wg_send_response(struct wg_peer *peer) { struct wg_pkt_response pkt; if (noise_create_response(peer->p_remote, &pkt.s_idx, &pkt.r_idx, pkt.ue, pkt.en) != 0) return; DPRINTF(peer->p_sc, "Sending handshake response to peer %" PRIu64 "\n", peer->p_id); wg_timers_event_session_derived(peer); pkt.t = WG_PKT_RESPONSE; cookie_maker_mac(&peer->p_cookie, &pkt.m, &pkt, sizeof(pkt)-sizeof(pkt.m)); wg_peer_send_buf(peer, (uint8_t*)&pkt, sizeof(pkt)); } static void wg_send_cookie(struct wg_softc *sc, struct cookie_macs *cm, uint32_t idx, struct wg_endpoint *e) { struct wg_pkt_cookie pkt; DPRINTF(sc, "Sending cookie response for denied handshake message\n"); pkt.t = WG_PKT_COOKIE; pkt.r_idx = idx; cookie_checker_create_payload(&sc->sc_cookie, cm, pkt.nonce, pkt.ec, &e->e_remote.r_sa); wg_send_buf(sc, e, (uint8_t *)&pkt, sizeof(pkt)); } static void wg_send_keepalive(struct wg_peer *peer) { struct wg_packet *pkt; struct mbuf *m; if (wg_queue_len(&peer->p_stage_queue) > 0) goto send; if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) return; if ((pkt = wg_packet_alloc(m)) == NULL) { m_freem(m); return; } wg_queue_push_staged(&peer->p_stage_queue, pkt); DPRINTF(peer->p_sc, "Sending keepalive packet to peer %" PRIu64 "\n", peer->p_id); send: wg_peer_send_staged(peer); } static void wg_handshake(struct wg_softc *sc, struct wg_packet *pkt) { struct wg_pkt_initiation *init; struct wg_pkt_response *resp; struct wg_pkt_cookie *cook; struct wg_endpoint *e; struct wg_peer *peer; struct mbuf *m; struct noise_remote *remote = NULL; int res; bool underload = false; static sbintime_t wg_last_underload; /* sbinuptime */ underload = wg_queue_len(&sc->sc_handshake_queue) >= MAX_QUEUED_HANDSHAKES / 8; if (underload) { wg_last_underload = getsbinuptime(); } else if (wg_last_underload) { underload = wg_last_underload + UNDERLOAD_TIMEOUT * SBT_1S > getsbinuptime(); if (!underload) wg_last_underload = 0; } m = pkt->p_mbuf; e = &pkt->p_endpoint; if ((pkt->p_mbuf = m = m_pullup(m, m->m_pkthdr.len)) == NULL) goto error; switch (*mtod(m, uint32_t *)) { case WG_PKT_INITIATION: init = mtod(m, struct wg_pkt_initiation *); res = cookie_checker_validate_macs(&sc->sc_cookie, &init->m, init, sizeof(*init) - sizeof(init->m), underload, &e->e_remote.r_sa, if_getvnet(sc->sc_ifp)); if (res == EINVAL) { DPRINTF(sc, "Invalid initiation MAC\n"); goto error; } else if (res == ECONNREFUSED) { DPRINTF(sc, "Handshake ratelimited\n"); goto error; } else if (res == EAGAIN) { wg_send_cookie(sc, &init->m, init->s_idx, e); goto error; } else if (res != 0) { panic("unexpected response: %d\n", res); } if (noise_consume_initiation(sc->sc_local, &remote, init->s_idx, init->ue, init->es, init->ets) != 0) { DPRINTF(sc, "Invalid handshake initiation\n"); goto error; } peer = noise_remote_arg(remote); DPRINTF(sc, "Receiving handshake initiation from peer %" PRIu64 "\n", peer->p_id); wg_peer_set_endpoint(peer, e); wg_send_response(peer); break; case WG_PKT_RESPONSE: resp = mtod(m, struct wg_pkt_response *); res = cookie_checker_validate_macs(&sc->sc_cookie, &resp->m, resp, sizeof(*resp) - sizeof(resp->m), underload, &e->e_remote.r_sa, if_getvnet(sc->sc_ifp)); if (res == EINVAL) { DPRINTF(sc, "Invalid response MAC\n"); goto error; } else if (res == ECONNREFUSED) { DPRINTF(sc, "Handshake ratelimited\n"); goto error; } else if (res == EAGAIN) { wg_send_cookie(sc, &resp->m, resp->s_idx, e); goto error; } else if (res != 0) { panic("unexpected response: %d\n", res); } if (noise_consume_response(sc->sc_local, &remote, resp->s_idx, resp->r_idx, resp->ue, resp->en) != 0) { DPRINTF(sc, "Invalid handshake response\n"); goto error; } peer = noise_remote_arg(remote); DPRINTF(sc, "Receiving handshake response from peer %" PRIu64 "\n", peer->p_id); wg_peer_set_endpoint(peer, e); wg_timers_event_session_derived(peer); wg_timers_event_handshake_complete(peer); break; case WG_PKT_COOKIE: cook = mtod(m, struct wg_pkt_cookie *); if ((remote = noise_remote_index(sc->sc_local, cook->r_idx)) == NULL) { DPRINTF(sc, "Unknown cookie index\n"); goto error; } peer = noise_remote_arg(remote); if (cookie_maker_consume_payload(&peer->p_cookie, cook->nonce, cook->ec) == 0) { DPRINTF(sc, "Receiving cookie response\n"); } else { DPRINTF(sc, "Could not decrypt cookie response\n"); goto error; } goto not_authenticated; default: panic("invalid packet in handshake queue"); } wg_timers_event_any_authenticated_packet_received(peer); wg_timers_event_any_authenticated_packet_traversal(peer); not_authenticated: counter_u64_add(peer->p_rx_bytes, m->m_pkthdr.len); if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); error: if (remote != NULL) noise_remote_put(remote); wg_packet_free(pkt); } static void wg_softc_handshake_receive(struct wg_softc *sc) { struct wg_packet *pkt; while ((pkt = wg_queue_dequeue_handshake(&sc->sc_handshake_queue)) != NULL) wg_handshake(sc, pkt); } static void wg_mbuf_reset(struct mbuf *m) { struct m_tag *t, *tmp; /* * We want to reset the mbuf to a newly allocated state, containing * just the packet contents. Unfortunately FreeBSD doesn't seem to * offer this anywhere, so we have to make it up as we go. If we can * get this in kern/kern_mbuf.c, that would be best. * * Notice: this may break things unexpectedly but it is better to fail * closed in the extreme case than leak informtion in every * case. * * With that said, all this attempts to do is remove any extraneous * information that could be present. */ M_ASSERTPKTHDR(m); m->m_flags &= ~(M_BCAST|M_MCAST|M_VLANTAG|M_PROMISC|M_PROTOFLAGS); M_HASHTYPE_CLEAR(m); #ifdef NUMA m->m_pkthdr.numa_domain = M_NODOM; #endif SLIST_FOREACH_SAFE(t, &m->m_pkthdr.tags, m_tag_link, tmp) { if ((t->m_tag_id != 0 || t->m_tag_cookie != MTAG_WGLOOP) && t->m_tag_id != PACKET_TAG_MACLABEL) m_tag_delete(m, t); } KASSERT((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0, ("%s: mbuf %p has a send tag", __func__, m)); m->m_pkthdr.csum_flags = 0; m->m_pkthdr.PH_per.sixtyfour[0] = 0; m->m_pkthdr.PH_loc.sixtyfour[0] = 0; } static inline unsigned int calculate_padding(struct wg_packet *pkt) { unsigned int padded_size, last_unit = pkt->p_mbuf->m_pkthdr.len; if (__predict_false(!pkt->p_mtu)) return (last_unit + (WG_PKT_PADDING - 1)) & ~(WG_PKT_PADDING - 1); if (__predict_false(last_unit > pkt->p_mtu)) last_unit %= pkt->p_mtu; padded_size = (last_unit + (WG_PKT_PADDING - 1)) & ~(WG_PKT_PADDING - 1); if (pkt->p_mtu < padded_size) padded_size = pkt->p_mtu; return padded_size - last_unit; } static void wg_encrypt(struct wg_softc *sc, struct wg_packet *pkt) { static const uint8_t padding[WG_PKT_PADDING] = { 0 }; struct wg_pkt_data *data; struct wg_peer *peer; struct noise_remote *remote; struct mbuf *m; uint32_t idx; unsigned int padlen; enum wg_ring_state state = WG_PACKET_DEAD; remote = noise_keypair_remote(pkt->p_keypair); peer = noise_remote_arg(remote); m = pkt->p_mbuf; /* Pad the packet */ padlen = calculate_padding(pkt); if (padlen != 0 && !m_append(m, padlen, padding)) goto out; /* Do encryption */ if (noise_keypair_encrypt(pkt->p_keypair, &idx, pkt->p_nonce, m) != 0) goto out; /* Put header into packet */ M_PREPEND(m, sizeof(struct wg_pkt_data), M_NOWAIT); if (m == NULL) goto out; data = mtod(m, struct wg_pkt_data *); data->t = WG_PKT_DATA; data->r_idx = idx; data->nonce = htole64(pkt->p_nonce); wg_mbuf_reset(m); state = WG_PACKET_CRYPTED; out: pkt->p_mbuf = m; wmb(); pkt->p_state = state; GROUPTASK_ENQUEUE(&peer->p_send); noise_remote_put(remote); } static void wg_decrypt(struct wg_softc *sc, struct wg_packet *pkt) { struct wg_peer *peer, *allowed_peer; struct noise_remote *remote; struct mbuf *m; int len; enum wg_ring_state state = WG_PACKET_DEAD; remote = noise_keypair_remote(pkt->p_keypair); peer = noise_remote_arg(remote); m = pkt->p_mbuf; /* Read nonce and then adjust to remove the header. */ pkt->p_nonce = le64toh(mtod(m, struct wg_pkt_data *)->nonce); m_adj(m, sizeof(struct wg_pkt_data)); if (noise_keypair_decrypt(pkt->p_keypair, pkt->p_nonce, m) != 0) goto out; /* A packet with length 0 is a keepalive packet */ if (__predict_false(m->m_pkthdr.len == 0)) { DPRINTF(sc, "Receiving keepalive packet from peer " "%" PRIu64 "\n", peer->p_id); state = WG_PACKET_CRYPTED; goto out; } /* * We can let the network stack handle the intricate validation of the * IP header, we just worry about the sizeof and the version, so we can * read the source address in wg_aip_lookup. */ if (determine_af_and_pullup(&m, &pkt->p_af) == 0) { if (pkt->p_af == AF_INET) { struct ip *ip = mtod(m, struct ip *); allowed_peer = wg_aip_lookup(sc, AF_INET, &ip->ip_src); len = ntohs(ip->ip_len); if (len >= sizeof(struct ip) && len < m->m_pkthdr.len) m_adj(m, len - m->m_pkthdr.len); } else if (pkt->p_af == AF_INET6) { struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); allowed_peer = wg_aip_lookup(sc, AF_INET6, &ip6->ip6_src); len = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr); if (len < m->m_pkthdr.len) m_adj(m, len - m->m_pkthdr.len); } else panic("determine_af_and_pullup returned unexpected value"); } else { DPRINTF(sc, "Packet is neither ipv4 nor ipv6 from peer %" PRIu64 "\n", peer->p_id); goto out; } /* We only want to compare the address, not dereference, so drop the ref. */ if (allowed_peer != NULL) noise_remote_put(allowed_peer->p_remote); if (__predict_false(peer != allowed_peer)) { DPRINTF(sc, "Packet has unallowed src IP from peer %" PRIu64 "\n", peer->p_id); goto out; } wg_mbuf_reset(m); state = WG_PACKET_CRYPTED; out: pkt->p_mbuf = m; wmb(); pkt->p_state = state; GROUPTASK_ENQUEUE(&peer->p_recv); noise_remote_put(remote); } static void wg_softc_decrypt(struct wg_softc *sc) { struct wg_packet *pkt; while ((pkt = wg_queue_dequeue_parallel(&sc->sc_decrypt_parallel)) != NULL) wg_decrypt(sc, pkt); } static void wg_softc_encrypt(struct wg_softc *sc) { struct wg_packet *pkt; while ((pkt = wg_queue_dequeue_parallel(&sc->sc_encrypt_parallel)) != NULL) wg_encrypt(sc, pkt); } static void wg_encrypt_dispatch(struct wg_softc *sc) { /* * The update to encrypt_last_cpu is racey such that we may * reschedule the task for the same CPU multiple times, but * the race doesn't really matter. */ u_int cpu = (sc->sc_encrypt_last_cpu + 1) % mp_ncpus; sc->sc_encrypt_last_cpu = cpu; GROUPTASK_ENQUEUE(&sc->sc_encrypt[cpu]); } static void wg_decrypt_dispatch(struct wg_softc *sc) { u_int cpu = (sc->sc_decrypt_last_cpu + 1) % mp_ncpus; sc->sc_decrypt_last_cpu = cpu; GROUPTASK_ENQUEUE(&sc->sc_decrypt[cpu]); } static void wg_deliver_out(struct wg_peer *peer) { struct wg_endpoint endpoint; struct wg_softc *sc = peer->p_sc; struct wg_packet *pkt; struct mbuf *m; int rc, len; wg_peer_get_endpoint(peer, &endpoint); while ((pkt = wg_queue_dequeue_serial(&peer->p_encrypt_serial)) != NULL) { if (pkt->p_state != WG_PACKET_CRYPTED) goto error; m = pkt->p_mbuf; pkt->p_mbuf = NULL; len = m->m_pkthdr.len; wg_timers_event_any_authenticated_packet_traversal(peer); wg_timers_event_any_authenticated_packet_sent(peer); rc = wg_send(sc, &endpoint, m); if (rc == 0) { if (len > (sizeof(struct wg_pkt_data) + NOISE_AUTHTAG_LEN)) wg_timers_event_data_sent(peer); counter_u64_add(peer->p_tx_bytes, len); } else if (rc == EADDRNOTAVAIL) { wg_peer_clear_src(peer); wg_peer_get_endpoint(peer, &endpoint); goto error; } else { goto error; } wg_packet_free(pkt); if (noise_keep_key_fresh_send(peer->p_remote)) wg_timers_event_want_initiation(peer); continue; error: if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); wg_packet_free(pkt); } } static void wg_deliver_in(struct wg_peer *peer) { struct wg_softc *sc = peer->p_sc; if_t ifp = sc->sc_ifp; struct wg_packet *pkt; struct mbuf *m; struct epoch_tracker et; while ((pkt = wg_queue_dequeue_serial(&peer->p_decrypt_serial)) != NULL) { if (pkt->p_state != WG_PACKET_CRYPTED) goto error; m = pkt->p_mbuf; if (noise_keypair_nonce_check(pkt->p_keypair, pkt->p_nonce) != 0) goto error; if (noise_keypair_received_with(pkt->p_keypair) == ECONNRESET) wg_timers_event_handshake_complete(peer); wg_timers_event_any_authenticated_packet_received(peer); wg_timers_event_any_authenticated_packet_traversal(peer); wg_peer_set_endpoint(peer, &pkt->p_endpoint); counter_u64_add(peer->p_rx_bytes, m->m_pkthdr.len + sizeof(struct wg_pkt_data) + NOISE_AUTHTAG_LEN); if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len + sizeof(struct wg_pkt_data) + NOISE_AUTHTAG_LEN); if (m->m_pkthdr.len == 0) goto done; MPASS(pkt->p_af == AF_INET || pkt->p_af == AF_INET6); pkt->p_mbuf = NULL; m->m_pkthdr.rcvif = ifp; NET_EPOCH_ENTER(et); BPF_MTAP2_AF(ifp, m, pkt->p_af); CURVNET_SET(if_getvnet(ifp)); M_SETFIB(m, if_getfib(ifp)); if (pkt->p_af == AF_INET) netisr_dispatch(NETISR_IP, m); if (pkt->p_af == AF_INET6) netisr_dispatch(NETISR_IPV6, m); CURVNET_RESTORE(); NET_EPOCH_EXIT(et); wg_timers_event_data_received(peer); done: if (noise_keep_key_fresh_recv(peer->p_remote)) wg_timers_event_want_initiation(peer); wg_packet_free(pkt); continue; error: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); wg_packet_free(pkt); } } static struct wg_packet * wg_packet_alloc(struct mbuf *m) { struct wg_packet *pkt; if ((pkt = uma_zalloc(wg_packet_zone, M_NOWAIT | M_ZERO)) == NULL) return (NULL); pkt->p_mbuf = m; return (pkt); } static void wg_packet_free(struct wg_packet *pkt) { if (pkt->p_keypair != NULL) noise_keypair_put(pkt->p_keypair); if (pkt->p_mbuf != NULL) m_freem(pkt->p_mbuf); uma_zfree(wg_packet_zone, pkt); } static void wg_queue_init(struct wg_queue *queue, const char *name) { mtx_init(&queue->q_mtx, name, NULL, MTX_DEF); STAILQ_INIT(&queue->q_queue); queue->q_len = 0; } static void wg_queue_deinit(struct wg_queue *queue) { wg_queue_purge(queue); mtx_destroy(&queue->q_mtx); } static size_t wg_queue_len(struct wg_queue *queue) { return (queue->q_len); } static int wg_queue_enqueue_handshake(struct wg_queue *hs, struct wg_packet *pkt) { int ret = 0; mtx_lock(&hs->q_mtx); if (hs->q_len < MAX_QUEUED_HANDSHAKES) { STAILQ_INSERT_TAIL(&hs->q_queue, pkt, p_parallel); hs->q_len++; } else { ret = ENOBUFS; } mtx_unlock(&hs->q_mtx); if (ret != 0) wg_packet_free(pkt); return (ret); } static struct wg_packet * wg_queue_dequeue_handshake(struct wg_queue *hs) { struct wg_packet *pkt; mtx_lock(&hs->q_mtx); if ((pkt = STAILQ_FIRST(&hs->q_queue)) != NULL) { STAILQ_REMOVE_HEAD(&hs->q_queue, p_parallel); hs->q_len--; } mtx_unlock(&hs->q_mtx); return (pkt); } static void wg_queue_push_staged(struct wg_queue *staged, struct wg_packet *pkt) { struct wg_packet *old = NULL; mtx_lock(&staged->q_mtx); if (staged->q_len >= MAX_STAGED_PKT) { old = STAILQ_FIRST(&staged->q_queue); STAILQ_REMOVE_HEAD(&staged->q_queue, p_parallel); staged->q_len--; } STAILQ_INSERT_TAIL(&staged->q_queue, pkt, p_parallel); staged->q_len++; mtx_unlock(&staged->q_mtx); if (old != NULL) wg_packet_free(old); } static void wg_queue_enlist_staged(struct wg_queue *staged, struct wg_packet_list *list) { struct wg_packet *pkt, *tpkt; STAILQ_FOREACH_SAFE(pkt, list, p_parallel, tpkt) wg_queue_push_staged(staged, pkt); } static void wg_queue_delist_staged(struct wg_queue *staged, struct wg_packet_list *list) { STAILQ_INIT(list); mtx_lock(&staged->q_mtx); STAILQ_CONCAT(list, &staged->q_queue); staged->q_len = 0; mtx_unlock(&staged->q_mtx); } static void wg_queue_purge(struct wg_queue *staged) { struct wg_packet_list list; struct wg_packet *pkt, *tpkt; wg_queue_delist_staged(staged, &list); STAILQ_FOREACH_SAFE(pkt, &list, p_parallel, tpkt) wg_packet_free(pkt); } static int wg_queue_both(struct wg_queue *parallel, struct wg_queue *serial, struct wg_packet *pkt) { pkt->p_state = WG_PACKET_UNCRYPTED; mtx_lock(&serial->q_mtx); if (serial->q_len < MAX_QUEUED_PKT) { serial->q_len++; STAILQ_INSERT_TAIL(&serial->q_queue, pkt, p_serial); } else { mtx_unlock(&serial->q_mtx); wg_packet_free(pkt); return (ENOBUFS); } mtx_unlock(&serial->q_mtx); mtx_lock(¶llel->q_mtx); if (parallel->q_len < MAX_QUEUED_PKT) { parallel->q_len++; STAILQ_INSERT_TAIL(¶llel->q_queue, pkt, p_parallel); } else { mtx_unlock(¶llel->q_mtx); pkt->p_state = WG_PACKET_DEAD; return (ENOBUFS); } mtx_unlock(¶llel->q_mtx); return (0); } static struct wg_packet * wg_queue_dequeue_serial(struct wg_queue *serial) { struct wg_packet *pkt = NULL; mtx_lock(&serial->q_mtx); if (serial->q_len > 0 && STAILQ_FIRST(&serial->q_queue)->p_state != WG_PACKET_UNCRYPTED) { serial->q_len--; pkt = STAILQ_FIRST(&serial->q_queue); STAILQ_REMOVE_HEAD(&serial->q_queue, p_serial); } mtx_unlock(&serial->q_mtx); return (pkt); } static struct wg_packet * wg_queue_dequeue_parallel(struct wg_queue *parallel) { struct wg_packet *pkt = NULL; mtx_lock(¶llel->q_mtx); if (parallel->q_len > 0) { parallel->q_len--; pkt = STAILQ_FIRST(¶llel->q_queue); STAILQ_REMOVE_HEAD(¶llel->q_queue, p_parallel); } mtx_unlock(¶llel->q_mtx); return (pkt); } static bool wg_input(struct mbuf *m, int offset, struct inpcb *inpcb, const struct sockaddr *sa, void *_sc) { #ifdef INET const struct sockaddr_in *sin; #endif #ifdef INET6 const struct sockaddr_in6 *sin6; #endif struct noise_remote *remote; struct wg_pkt_data *data; struct wg_packet *pkt; struct wg_peer *peer; struct wg_softc *sc = _sc; struct mbuf *defragged; defragged = m_defrag(m, M_NOWAIT); if (defragged) m = defragged; m = m_unshare(m, M_NOWAIT); if (!m) { if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); return true; } /* Caller provided us with `sa`, no need for this header. */ m_adj(m, offset + sizeof(struct udphdr)); /* Pullup enough to read packet type */ if ((m = m_pullup(m, sizeof(uint32_t))) == NULL) { if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); return true; } if ((pkt = wg_packet_alloc(m)) == NULL) { if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); m_freem(m); return true; } /* Save send/recv address and port for later. */ switch (sa->sa_family) { #ifdef INET case AF_INET: sin = (const struct sockaddr_in *)sa; pkt->p_endpoint.e_remote.r_sin = sin[0]; pkt->p_endpoint.e_local.l_in = sin[1].sin_addr; break; #endif #ifdef INET6 case AF_INET6: sin6 = (const struct sockaddr_in6 *)sa; pkt->p_endpoint.e_remote.r_sin6 = sin6[0]; pkt->p_endpoint.e_local.l_in6 = sin6[1].sin6_addr; break; #endif default: goto error; } if ((m->m_pkthdr.len == sizeof(struct wg_pkt_initiation) && *mtod(m, uint32_t *) == WG_PKT_INITIATION) || (m->m_pkthdr.len == sizeof(struct wg_pkt_response) && *mtod(m, uint32_t *) == WG_PKT_RESPONSE) || (m->m_pkthdr.len == sizeof(struct wg_pkt_cookie) && *mtod(m, uint32_t *) == WG_PKT_COOKIE)) { if (wg_queue_enqueue_handshake(&sc->sc_handshake_queue, pkt) != 0) { if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); DPRINTF(sc, "Dropping handshake packet\n"); } GROUPTASK_ENQUEUE(&sc->sc_handshake); } else if (m->m_pkthdr.len >= sizeof(struct wg_pkt_data) + NOISE_AUTHTAG_LEN && *mtod(m, uint32_t *) == WG_PKT_DATA) { /* Pullup whole header to read r_idx below. */ if ((pkt->p_mbuf = m_pullup(m, sizeof(struct wg_pkt_data))) == NULL) goto error; data = mtod(pkt->p_mbuf, struct wg_pkt_data *); if ((pkt->p_keypair = noise_keypair_lookup(sc->sc_local, data->r_idx)) == NULL) goto error; remote = noise_keypair_remote(pkt->p_keypair); peer = noise_remote_arg(remote); if (wg_queue_both(&sc->sc_decrypt_parallel, &peer->p_decrypt_serial, pkt) != 0) if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); wg_decrypt_dispatch(sc); noise_remote_put(remote); } else { goto error; } return true; error: if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); wg_packet_free(pkt); return true; } static void wg_peer_send_staged(struct wg_peer *peer) { struct wg_packet_list list; struct noise_keypair *keypair; struct wg_packet *pkt, *tpkt; struct wg_softc *sc = peer->p_sc; wg_queue_delist_staged(&peer->p_stage_queue, &list); if (STAILQ_EMPTY(&list)) return; if ((keypair = noise_keypair_current(peer->p_remote)) == NULL) goto error; STAILQ_FOREACH(pkt, &list, p_parallel) { if (noise_keypair_nonce_next(keypair, &pkt->p_nonce) != 0) goto error_keypair; } STAILQ_FOREACH_SAFE(pkt, &list, p_parallel, tpkt) { pkt->p_keypair = noise_keypair_ref(keypair); if (wg_queue_both(&sc->sc_encrypt_parallel, &peer->p_encrypt_serial, pkt) != 0) if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); } wg_encrypt_dispatch(sc); noise_keypair_put(keypair); return; error_keypair: noise_keypair_put(keypair); error: wg_queue_enlist_staged(&peer->p_stage_queue, &list); wg_timers_event_want_initiation(peer); } static inline void xmit_err(if_t ifp, struct mbuf *m, struct wg_packet *pkt, sa_family_t af) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); switch (af) { #ifdef INET case AF_INET: icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); if (pkt) pkt->p_mbuf = NULL; m = NULL; break; #endif #ifdef INET6 case AF_INET6: icmp6_error(m, ICMP6_DST_UNREACH, 0, 0); if (pkt) pkt->p_mbuf = NULL; m = NULL; break; #endif } if (pkt) wg_packet_free(pkt); else if (m) m_freem(m); } static int wg_xmit(if_t ifp, struct mbuf *m, sa_family_t af, uint32_t mtu) { struct wg_packet *pkt = NULL; struct wg_softc *sc = if_getsoftc(ifp); struct wg_peer *peer; int rc = 0; sa_family_t peer_af; /* Work around lifetime issue in the ipv6 mld code. */ if (__predict_false((if_getflags(ifp) & IFF_DYING) || !sc)) { rc = ENXIO; goto err_xmit; } if ((pkt = wg_packet_alloc(m)) == NULL) { rc = ENOBUFS; goto err_xmit; } pkt->p_mtu = mtu; pkt->p_af = af; if (af == AF_INET) { peer = wg_aip_lookup(sc, AF_INET, &mtod(m, struct ip *)->ip_dst); } else if (af == AF_INET6) { peer = wg_aip_lookup(sc, AF_INET6, &mtod(m, struct ip6_hdr *)->ip6_dst); } else { rc = EAFNOSUPPORT; goto err_xmit; } BPF_MTAP2_AF(ifp, m, pkt->p_af); if (__predict_false(peer == NULL)) { rc = ENOKEY; goto err_xmit; } if (__predict_false(if_tunnel_check_nesting(ifp, m, MTAG_WGLOOP, MAX_LOOPS))) { DPRINTF(sc, "Packet looped"); rc = ELOOP; goto err_peer; } peer_af = peer->p_endpoint.e_remote.r_sa.sa_family; if (__predict_false(peer_af != AF_INET && peer_af != AF_INET6)) { DPRINTF(sc, "No valid endpoint has been configured or " "discovered for peer %" PRIu64 "\n", peer->p_id); rc = EHOSTUNREACH; goto err_peer; } wg_queue_push_staged(&peer->p_stage_queue, pkt); wg_peer_send_staged(peer); noise_remote_put(peer->p_remote); return (0); err_peer: noise_remote_put(peer->p_remote); err_xmit: xmit_err(ifp, m, pkt, af); return (rc); } static inline int determine_af_and_pullup(struct mbuf **m, sa_family_t *af) { u_char ipv; if ((*m)->m_pkthdr.len >= sizeof(struct ip6_hdr)) *m = m_pullup(*m, sizeof(struct ip6_hdr)); else if ((*m)->m_pkthdr.len >= sizeof(struct ip)) *m = m_pullup(*m, sizeof(struct ip)); else return (EAFNOSUPPORT); if (*m == NULL) return (ENOBUFS); ipv = mtod(*m, struct ip *)->ip_v; if (ipv == 4) *af = AF_INET; else if (ipv == 6 && (*m)->m_pkthdr.len >= sizeof(struct ip6_hdr)) *af = AF_INET6; else return (EAFNOSUPPORT); return (0); } static int wg_transmit(if_t ifp, struct mbuf *m) { sa_family_t af; int ret; struct mbuf *defragged; defragged = m_defrag(m, M_NOWAIT); if (defragged) m = defragged; m = m_unshare(m, M_NOWAIT); if (!m) { xmit_err(ifp, m, NULL, AF_UNSPEC); return (ENOBUFS); } ret = determine_af_and_pullup(&m, &af); if (ret) { xmit_err(ifp, m, NULL, AF_UNSPEC); return (ret); } return (wg_xmit(ifp, m, af, if_getmtu(ifp))); } static int wg_output(if_t ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { sa_family_t parsed_af; uint32_t af, mtu; int ret; struct mbuf *defragged; if (dst->sa_family == AF_UNSPEC) memcpy(&af, dst->sa_data, sizeof(af)); else af = dst->sa_family; if (af == AF_UNSPEC) { xmit_err(ifp, m, NULL, af); return (EAFNOSUPPORT); } defragged = m_defrag(m, M_NOWAIT); if (defragged) m = defragged; m = m_unshare(m, M_NOWAIT); if (!m) { xmit_err(ifp, m, NULL, AF_UNSPEC); return (ENOBUFS); } ret = determine_af_and_pullup(&m, &parsed_af); if (ret) { xmit_err(ifp, m, NULL, AF_UNSPEC); return (ret); } if (parsed_af != af) { xmit_err(ifp, m, NULL, AF_UNSPEC); return (EAFNOSUPPORT); } mtu = (ro != NULL && ro->ro_mtu > 0) ? ro->ro_mtu : if_getmtu(ifp); return (wg_xmit(ifp, m, parsed_af, mtu)); } static int wg_peer_add(struct wg_softc *sc, const nvlist_t *nvl) { uint8_t public[WG_KEY_SIZE]; const void *pub_key, *preshared_key = NULL; const struct sockaddr *endpoint; int err; size_t size; struct noise_remote *remote; struct wg_peer *peer = NULL; bool need_insert = false; sx_assert(&sc->sc_lock, SX_XLOCKED); if (!nvlist_exists_binary(nvl, "public-key")) { return (EINVAL); } pub_key = nvlist_get_binary(nvl, "public-key", &size); if (size != WG_KEY_SIZE) { return (EINVAL); } if (noise_local_keys(sc->sc_local, public, NULL) == 0 && bcmp(public, pub_key, WG_KEY_SIZE) == 0) { return (0); // Silently ignored; not actually a failure. } if ((remote = noise_remote_lookup(sc->sc_local, pub_key)) != NULL) peer = noise_remote_arg(remote); if (nvlist_exists_bool(nvl, "remove") && nvlist_get_bool(nvl, "remove")) { if (remote != NULL) { wg_peer_destroy(peer); noise_remote_put(remote); } return (0); } if (nvlist_exists_bool(nvl, "replace-allowedips") && nvlist_get_bool(nvl, "replace-allowedips") && peer != NULL) { wg_aip_remove_all(sc, peer); } if (peer == NULL) { peer = wg_peer_alloc(sc, pub_key); need_insert = true; } if (nvlist_exists_binary(nvl, "endpoint")) { endpoint = nvlist_get_binary(nvl, "endpoint", &size); if (size > sizeof(peer->p_endpoint.e_remote)) { err = EINVAL; goto out; } memcpy(&peer->p_endpoint.e_remote, endpoint, size); } if (nvlist_exists_binary(nvl, "preshared-key")) { preshared_key = nvlist_get_binary(nvl, "preshared-key", &size); if (size != WG_KEY_SIZE) { err = EINVAL; goto out; } noise_remote_set_psk(peer->p_remote, preshared_key); } if (nvlist_exists_number(nvl, "persistent-keepalive-interval")) { uint64_t pki = nvlist_get_number(nvl, "persistent-keepalive-interval"); if (pki > UINT16_MAX) { err = EINVAL; goto out; } wg_timers_set_persistent_keepalive(peer, pki); } if (nvlist_exists_nvlist_array(nvl, "allowed-ips")) { const void *addr; uint64_t cidr; const nvlist_t * const * aipl; size_t allowedip_count; aipl = nvlist_get_nvlist_array(nvl, "allowed-ips", &allowedip_count); for (size_t idx = 0; idx < allowedip_count; idx++) { if (!nvlist_exists_number(aipl[idx], "cidr")) continue; cidr = nvlist_get_number(aipl[idx], "cidr"); if (nvlist_exists_binary(aipl[idx], "ipv4")) { addr = nvlist_get_binary(aipl[idx], "ipv4", &size); if (addr == NULL || cidr > 32 || size != sizeof(struct in_addr)) { err = EINVAL; goto out; } if ((err = wg_aip_add(sc, peer, AF_INET, addr, cidr)) != 0) goto out; } else if (nvlist_exists_binary(aipl[idx], "ipv6")) { addr = nvlist_get_binary(aipl[idx], "ipv6", &size); if (addr == NULL || cidr > 128 || size != sizeof(struct in6_addr)) { err = EINVAL; goto out; } if ((err = wg_aip_add(sc, peer, AF_INET6, addr, cidr)) != 0) goto out; } else { continue; } } } if (need_insert) { if ((err = noise_remote_enable(peer->p_remote)) != 0) goto out; TAILQ_INSERT_TAIL(&sc->sc_peers, peer, p_entry); sc->sc_peers_num++; if (if_getlinkstate(sc->sc_ifp) == LINK_STATE_UP) wg_timers_enable(peer); } if (remote != NULL) noise_remote_put(remote); return (0); out: if (need_insert) /* If we fail, only destroy if it was new. */ wg_peer_destroy(peer); if (remote != NULL) noise_remote_put(remote); return (err); } static int wgc_set(struct wg_softc *sc, struct wg_data_io *wgd) { uint8_t public[WG_KEY_SIZE], private[WG_KEY_SIZE]; if_t ifp; void *nvlpacked; nvlist_t *nvl; ssize_t size; int err; ifp = sc->sc_ifp; if (wgd->wgd_size == 0 || wgd->wgd_data == NULL) return (EFAULT); /* Can nvlists be streamed in? It's not nice to impose arbitrary limits like that but * there needs to be _some_ limitation. */ if (wgd->wgd_size >= UINT32_MAX / 2) return (E2BIG); nvlpacked = malloc(wgd->wgd_size, M_TEMP, M_WAITOK | M_ZERO); err = copyin(wgd->wgd_data, nvlpacked, wgd->wgd_size); if (err) goto out; nvl = nvlist_unpack(nvlpacked, wgd->wgd_size, 0); if (nvl == NULL) { err = EBADMSG; goto out; } sx_xlock(&sc->sc_lock); if (nvlist_exists_bool(nvl, "replace-peers") && nvlist_get_bool(nvl, "replace-peers")) wg_peer_destroy_all(sc); if (nvlist_exists_number(nvl, "listen-port")) { uint64_t new_port = nvlist_get_number(nvl, "listen-port"); if (new_port > UINT16_MAX) { err = EINVAL; goto out_locked; } if (new_port != sc->sc_socket.so_port) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if ((err = wg_socket_init(sc, new_port)) != 0) goto out_locked; } else sc->sc_socket.so_port = new_port; } } if (nvlist_exists_binary(nvl, "private-key")) { const void *key = nvlist_get_binary(nvl, "private-key", &size); if (size != WG_KEY_SIZE) { err = EINVAL; goto out_locked; } if (noise_local_keys(sc->sc_local, NULL, private) != 0 || timingsafe_bcmp(private, key, WG_KEY_SIZE) != 0) { struct wg_peer *peer; if (curve25519_generate_public(public, key)) { /* Peer conflict: remove conflicting peer. */ struct noise_remote *remote; if ((remote = noise_remote_lookup(sc->sc_local, public)) != NULL) { peer = noise_remote_arg(remote); wg_peer_destroy(peer); noise_remote_put(remote); } } /* * Set the private key and invalidate all existing * handshakes. */ /* Note: we might be removing the private key. */ noise_local_private(sc->sc_local, key); if (noise_local_keys(sc->sc_local, NULL, NULL) == 0) cookie_checker_update(&sc->sc_cookie, public); else cookie_checker_update(&sc->sc_cookie, NULL); } } if (nvlist_exists_number(nvl, "user-cookie")) { uint64_t user_cookie = nvlist_get_number(nvl, "user-cookie"); if (user_cookie > UINT32_MAX) { err = EINVAL; goto out_locked; } err = wg_socket_set_cookie(sc, user_cookie); if (err) goto out_locked; } if (nvlist_exists_nvlist_array(nvl, "peers")) { size_t peercount; const nvlist_t * const*nvl_peers; nvl_peers = nvlist_get_nvlist_array(nvl, "peers", &peercount); for (int i = 0; i < peercount; i++) { err = wg_peer_add(sc, nvl_peers[i]); if (err != 0) goto out_locked; } } out_locked: sx_xunlock(&sc->sc_lock); nvlist_destroy(nvl); out: zfree(nvlpacked, M_TEMP); return (err); } static int wgc_get(struct wg_softc *sc, struct wg_data_io *wgd) { uint8_t public_key[WG_KEY_SIZE] = { 0 }; uint8_t private_key[WG_KEY_SIZE] = { 0 }; uint8_t preshared_key[NOISE_SYMMETRIC_KEY_LEN] = { 0 }; nvlist_t *nvl, *nvl_peer, *nvl_aip, **nvl_peers, **nvl_aips; size_t size, peer_count, aip_count, i, j; struct wg_timespec64 ts64; struct wg_peer *peer; struct wg_aip *aip; void *packed; int err = 0; nvl = nvlist_create(0); if (!nvl) return (ENOMEM); sx_slock(&sc->sc_lock); if (sc->sc_socket.so_port != 0) nvlist_add_number(nvl, "listen-port", sc->sc_socket.so_port); if (sc->sc_socket.so_user_cookie != 0) nvlist_add_number(nvl, "user-cookie", sc->sc_socket.so_user_cookie); if (noise_local_keys(sc->sc_local, public_key, private_key) == 0) { nvlist_add_binary(nvl, "public-key", public_key, WG_KEY_SIZE); if (wgc_privileged(sc)) nvlist_add_binary(nvl, "private-key", private_key, WG_KEY_SIZE); explicit_bzero(private_key, sizeof(private_key)); } peer_count = sc->sc_peers_num; if (peer_count) { nvl_peers = mallocarray(peer_count, sizeof(void *), M_NVLIST, M_WAITOK | M_ZERO); i = 0; TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) { if (i >= peer_count) panic("peers changed from under us"); nvl_peers[i++] = nvl_peer = nvlist_create(0); if (!nvl_peer) { err = ENOMEM; goto err_peer; } (void)noise_remote_keys(peer->p_remote, public_key, preshared_key); nvlist_add_binary(nvl_peer, "public-key", public_key, sizeof(public_key)); if (wgc_privileged(sc)) nvlist_add_binary(nvl_peer, "preshared-key", preshared_key, sizeof(preshared_key)); explicit_bzero(preshared_key, sizeof(preshared_key)); if (peer->p_endpoint.e_remote.r_sa.sa_family == AF_INET) nvlist_add_binary(nvl_peer, "endpoint", &peer->p_endpoint.e_remote, sizeof(struct sockaddr_in)); else if (peer->p_endpoint.e_remote.r_sa.sa_family == AF_INET6) nvlist_add_binary(nvl_peer, "endpoint", &peer->p_endpoint.e_remote, sizeof(struct sockaddr_in6)); wg_timers_get_last_handshake(peer, &ts64); nvlist_add_binary(nvl_peer, "last-handshake-time", &ts64, sizeof(ts64)); nvlist_add_number(nvl_peer, "persistent-keepalive-interval", peer->p_persistent_keepalive_interval); nvlist_add_number(nvl_peer, "rx-bytes", counter_u64_fetch(peer->p_rx_bytes)); nvlist_add_number(nvl_peer, "tx-bytes", counter_u64_fetch(peer->p_tx_bytes)); aip_count = peer->p_aips_num; if (aip_count) { nvl_aips = mallocarray(aip_count, sizeof(void *), M_NVLIST, M_WAITOK | M_ZERO); j = 0; LIST_FOREACH(aip, &peer->p_aips, a_entry) { if (j >= aip_count) panic("aips changed from under us"); nvl_aips[j++] = nvl_aip = nvlist_create(0); if (!nvl_aip) { err = ENOMEM; goto err_aip; } if (aip->a_af == AF_INET) { nvlist_add_binary(nvl_aip, "ipv4", &aip->a_addr.in, sizeof(aip->a_addr.in)); nvlist_add_number(nvl_aip, "cidr", bitcount32(aip->a_mask.ip)); } #ifdef INET6 else if (aip->a_af == AF_INET6) { nvlist_add_binary(nvl_aip, "ipv6", &aip->a_addr.in6, sizeof(aip->a_addr.in6)); nvlist_add_number(nvl_aip, "cidr", in6_mask2len(&aip->a_mask.in6, NULL)); } #endif } nvlist_add_nvlist_array(nvl_peer, "allowed-ips", (const nvlist_t *const *)nvl_aips, aip_count); err_aip: for (j = 0; j < aip_count; ++j) nvlist_destroy(nvl_aips[j]); free(nvl_aips, M_NVLIST); if (err) goto err_peer; } } nvlist_add_nvlist_array(nvl, "peers", (const nvlist_t * const *)nvl_peers, peer_count); err_peer: for (i = 0; i < peer_count; ++i) nvlist_destroy(nvl_peers[i]); free(nvl_peers, M_NVLIST); if (err) { sx_sunlock(&sc->sc_lock); goto err; } } sx_sunlock(&sc->sc_lock); packed = nvlist_pack(nvl, &size); if (!packed) { err = ENOMEM; goto err; } if (!wgd->wgd_size) { wgd->wgd_size = size; goto out; } if (wgd->wgd_size < size) { err = ENOSPC; goto out; } err = copyout(packed, wgd->wgd_data, size); wgd->wgd_size = size; out: zfree(packed, M_NVLIST); err: nvlist_destroy(nvl); return (err); } static int wg_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct wg_data_io *wgd = (struct wg_data_io *)data; struct ifreq *ifr = (struct ifreq *)data; struct wg_softc *sc; int ret = 0; sx_slock(&wg_sx); sc = if_getsoftc(ifp); if (!sc) { ret = ENXIO; goto out; } switch (cmd) { case SIOCSWG: ret = priv_check(curthread, PRIV_NET_WG); if (ret == 0) ret = wgc_set(sc, wgd); break; case SIOCGWG: ret = wgc_get(sc, wgd); break; /* Interface IOCTLs */ case SIOCSIFADDR: /* * This differs from *BSD norms, but is more uniform with how * WireGuard behaves elsewhere. */ break; case SIOCSIFFLAGS: if (if_getflags(ifp) & IFF_UP) ret = wg_up(sc); else wg_down(sc); break; case SIOCSIFMTU: if (ifr->ifr_mtu <= 0 || ifr->ifr_mtu > MAX_MTU) ret = EINVAL; else if_setmtu(ifp, ifr->ifr_mtu); break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCGTUNFIB: ifr->ifr_fib = sc->sc_socket.so_fibnum; break; case SIOCSTUNFIB: ret = priv_check(curthread, PRIV_NET_WG); if (ret) break; ret = priv_check(curthread, PRIV_NET_SETIFFIB); if (ret) break; sx_xlock(&sc->sc_lock); ret = wg_socket_set_fibnum(sc, ifr->ifr_fib); sx_xunlock(&sc->sc_lock); break; default: ret = ENOTTY; } out: sx_sunlock(&wg_sx); return (ret); } static int wg_up(struct wg_softc *sc) { if_t ifp = sc->sc_ifp; struct wg_peer *peer; int rc = EBUSY; sx_xlock(&sc->sc_lock); /* Jail's being removed, no more wg_up(). */ if ((sc->sc_flags & WGF_DYING) != 0) goto out; /* Silent success if we're already running. */ rc = 0; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) goto out; if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); rc = wg_socket_init(sc, sc->sc_socket.so_port); if (rc == 0) { TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) wg_timers_enable(peer); if_link_state_change(sc->sc_ifp, LINK_STATE_UP); } else { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); DPRINTF(sc, "Unable to initialize sockets: %d\n", rc); } out: sx_xunlock(&sc->sc_lock); return (rc); } static void wg_down(struct wg_softc *sc) { if_t ifp = sc->sc_ifp; struct wg_peer *peer; sx_xlock(&sc->sc_lock); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { sx_xunlock(&sc->sc_lock); return; } if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) { wg_queue_purge(&peer->p_stage_queue); wg_timers_disable(peer); } wg_queue_purge(&sc->sc_handshake_queue); TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) { noise_remote_handshake_clear(peer->p_remote); noise_remote_keypairs_clear(peer->p_remote); } if_link_state_change(sc->sc_ifp, LINK_STATE_DOWN); wg_socket_uninit(sc); sx_xunlock(&sc->sc_lock); } static int wg_clone_create(struct if_clone *ifc, char *name, size_t len, struct ifc_data *ifd, struct ifnet **ifpp) { struct wg_softc *sc; if_t ifp; sc = malloc(sizeof(*sc), M_WG, M_WAITOK | M_ZERO); sc->sc_local = noise_local_alloc(sc); sc->sc_encrypt = mallocarray(sizeof(struct grouptask), mp_ncpus, M_WG, M_WAITOK | M_ZERO); sc->sc_decrypt = mallocarray(sizeof(struct grouptask), mp_ncpus, M_WG, M_WAITOK | M_ZERO); if (!rn_inithead((void **)&sc->sc_aip4, offsetof(struct aip_addr, in) * NBBY)) goto free_decrypt; if (!rn_inithead((void **)&sc->sc_aip6, offsetof(struct aip_addr, in6) * NBBY)) goto free_aip4; atomic_add_int(&clone_count, 1); ifp = sc->sc_ifp = if_alloc(IFT_WIREGUARD); sc->sc_ucred = crhold(curthread->td_ucred); sc->sc_socket.so_fibnum = curthread->td_proc->p_fibnum; sc->sc_socket.so_port = 0; TAILQ_INIT(&sc->sc_peers); sc->sc_peers_num = 0; cookie_checker_init(&sc->sc_cookie); RADIX_NODE_HEAD_LOCK_INIT(sc->sc_aip4); RADIX_NODE_HEAD_LOCK_INIT(sc->sc_aip6); GROUPTASK_INIT(&sc->sc_handshake, 0, (gtask_fn_t *)wg_softc_handshake_receive, sc); taskqgroup_attach(qgroup_wg_tqg, &sc->sc_handshake, sc, NULL, NULL, "wg tx initiation"); wg_queue_init(&sc->sc_handshake_queue, "hsq"); for (int i = 0; i < mp_ncpus; i++) { GROUPTASK_INIT(&sc->sc_encrypt[i], 0, (gtask_fn_t *)wg_softc_encrypt, sc); taskqgroup_attach_cpu(qgroup_wg_tqg, &sc->sc_encrypt[i], sc, i, NULL, NULL, "wg encrypt"); GROUPTASK_INIT(&sc->sc_decrypt[i], 0, (gtask_fn_t *)wg_softc_decrypt, sc); taskqgroup_attach_cpu(qgroup_wg_tqg, &sc->sc_decrypt[i], sc, i, NULL, NULL, "wg decrypt"); } wg_queue_init(&sc->sc_encrypt_parallel, "encp"); wg_queue_init(&sc->sc_decrypt_parallel, "decp"); sx_init(&sc->sc_lock, "wg softc lock"); if_setsoftc(ifp, sc); if_setcapabilities(ifp, WG_CAPS); if_setcapenable(ifp, WG_CAPS); if_initname(ifp, wgname, ifd->unit); if_setmtu(ifp, DEFAULT_MTU); if_setflags(ifp, IFF_NOARP | IFF_MULTICAST); if_setinitfn(ifp, wg_init); if_setreassignfn(ifp, wg_reassign); if_setqflushfn(ifp, wg_qflush); if_settransmitfn(ifp, wg_transmit); if_setoutputfn(ifp, wg_output); if_setioctlfn(ifp, wg_ioctl); if_attach(ifp); bpfattach(ifp, DLT_NULL, sizeof(uint32_t)); #ifdef INET6 ND_IFINFO(ifp)->flags &= ~ND6_IFF_AUTO_LINKLOCAL; ND_IFINFO(ifp)->flags |= ND6_IFF_NO_DAD; #endif sx_xlock(&wg_sx); LIST_INSERT_HEAD(&wg_list, sc, sc_entry); sx_xunlock(&wg_sx); *ifpp = ifp; return (0); free_aip4: RADIX_NODE_HEAD_DESTROY(sc->sc_aip4); free(sc->sc_aip4, M_RTABLE); free_decrypt: free(sc->sc_decrypt, M_WG); free(sc->sc_encrypt, M_WG); noise_local_free(sc->sc_local, NULL); free(sc, M_WG); return (ENOMEM); } static void wg_clone_deferred_free(struct noise_local *l) { struct wg_softc *sc = noise_local_arg(l); free(sc, M_WG); atomic_add_int(&clone_count, -1); } static int wg_clone_destroy(struct if_clone *ifc, if_t ifp, uint32_t flags) { struct wg_softc *sc = if_getsoftc(ifp); struct ucred *cred; sx_xlock(&wg_sx); if_setsoftc(ifp, NULL); sx_xlock(&sc->sc_lock); sc->sc_flags |= WGF_DYING; cred = sc->sc_ucred; sc->sc_ucred = NULL; sx_xunlock(&sc->sc_lock); LIST_REMOVE(sc, sc_entry); sx_xunlock(&wg_sx); if_link_state_change(sc->sc_ifp, LINK_STATE_DOWN); CURVNET_SET(if_getvnet(sc->sc_ifp)); if_purgeaddrs(sc->sc_ifp); CURVNET_RESTORE(); sx_xlock(&sc->sc_lock); wg_socket_uninit(sc); sx_xunlock(&sc->sc_lock); /* * No guarantees that all traffic have passed until the epoch has * elapsed with the socket closed. */ NET_EPOCH_WAIT(); taskqgroup_drain_all(qgroup_wg_tqg); sx_xlock(&sc->sc_lock); wg_peer_destroy_all(sc); NET_EPOCH_DRAIN_CALLBACKS(); sx_xunlock(&sc->sc_lock); sx_destroy(&sc->sc_lock); taskqgroup_detach(qgroup_wg_tqg, &sc->sc_handshake); for (int i = 0; i < mp_ncpus; i++) { taskqgroup_detach(qgroup_wg_tqg, &sc->sc_encrypt[i]); taskqgroup_detach(qgroup_wg_tqg, &sc->sc_decrypt[i]); } free(sc->sc_encrypt, M_WG); free(sc->sc_decrypt, M_WG); wg_queue_deinit(&sc->sc_handshake_queue); wg_queue_deinit(&sc->sc_encrypt_parallel); wg_queue_deinit(&sc->sc_decrypt_parallel); RADIX_NODE_HEAD_DESTROY(sc->sc_aip4); RADIX_NODE_HEAD_DESTROY(sc->sc_aip6); rn_detachhead((void **)&sc->sc_aip4); rn_detachhead((void **)&sc->sc_aip6); cookie_checker_free(&sc->sc_cookie); if (cred != NULL) crfree(cred); if_detach(sc->sc_ifp); if_free(sc->sc_ifp); noise_local_free(sc->sc_local, wg_clone_deferred_free); return (0); } static void wg_qflush(if_t ifp __unused) { } /* * Privileged information (private-key, preshared-key) are only exported for * root and jailed root by default. */ static bool wgc_privileged(struct wg_softc *sc) { struct thread *td; td = curthread; return (priv_check(td, PRIV_NET_WG) == 0); } static void wg_reassign(if_t ifp, struct vnet *new_vnet __unused, char *unused __unused) { struct wg_softc *sc; sc = if_getsoftc(ifp); wg_down(sc); } static void wg_init(void *xsc) { struct wg_softc *sc; sc = xsc; wg_up(sc); } static void vnet_wg_init(const void *unused __unused) { struct if_clone_addreq req = { .create_f = wg_clone_create, .destroy_f = wg_clone_destroy, .flags = IFC_F_AUTOUNIT, }; V_wg_cloner = ifc_attach_cloner(wgname, &req); } VNET_SYSINIT(vnet_wg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, vnet_wg_init, NULL); static void vnet_wg_uninit(const void *unused __unused) { if (V_wg_cloner) ifc_detach_cloner(V_wg_cloner); } VNET_SYSUNINIT(vnet_wg_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, vnet_wg_uninit, NULL); static int wg_prison_remove(void *obj, void *data __unused) { const struct prison *pr = obj; struct wg_softc *sc; /* * Do a pass through all if_wg interfaces and release creds on any from * the jail that are supposed to be going away. This will, in turn, let * the jail die so that we don't end up with Schrödinger's jail. */ sx_slock(&wg_sx); LIST_FOREACH(sc, &wg_list, sc_entry) { sx_xlock(&sc->sc_lock); if (!(sc->sc_flags & WGF_DYING) && sc->sc_ucred && sc->sc_ucred->cr_prison == pr) { struct ucred *cred = sc->sc_ucred; DPRINTF(sc, "Creating jail exiting\n"); if_link_state_change(sc->sc_ifp, LINK_STATE_DOWN); wg_socket_uninit(sc); sc->sc_ucred = NULL; crfree(cred); sc->sc_flags |= WGF_DYING; } sx_xunlock(&sc->sc_lock); } sx_sunlock(&wg_sx); return (0); } #ifdef SELFTESTS #include "selftest/allowedips.c" static bool wg_run_selftests(void) { bool ret = true; ret &= wg_allowedips_selftest(); ret &= noise_counter_selftest(); ret &= cookie_selftest(); return ret; } #else static inline bool wg_run_selftests(void) { return true; } #endif static int wg_module_init(void) { int ret; osd_method_t methods[PR_MAXMETHOD] = { [PR_METHOD_REMOVE] = wg_prison_remove, }; if ((wg_packet_zone = uma_zcreate("wg packet", sizeof(struct wg_packet), NULL, NULL, NULL, NULL, 0, 0)) == NULL) return (ENOMEM); ret = crypto_init(); if (ret != 0) return (ret); ret = cookie_init(); if (ret != 0) return (ret); wg_osd_jail_slot = osd_jail_register(NULL, methods); if (!wg_run_selftests()) return (ENOTRECOVERABLE); return (0); } static void wg_module_deinit(void) { VNET_ITERATOR_DECL(vnet_iter); VNET_LIST_RLOCK(); VNET_FOREACH(vnet_iter) { struct if_clone *clone = VNET_VNET(vnet_iter, wg_cloner); if (clone) { ifc_detach_cloner(clone); VNET_VNET(vnet_iter, wg_cloner) = NULL; } } VNET_LIST_RUNLOCK(); NET_EPOCH_WAIT(); MPASS(LIST_EMPTY(&wg_list)); if (wg_osd_jail_slot != 0) osd_jail_deregister(wg_osd_jail_slot); cookie_deinit(); crypto_deinit(); if (wg_packet_zone != NULL) uma_zdestroy(wg_packet_zone); } static int wg_module_event_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: return wg_module_init(); case MOD_UNLOAD: wg_module_deinit(); break; default: return (EOPNOTSUPP); } return (0); } static moduledata_t wg_moduledata = { "if_wg", wg_module_event_handler, NULL }; DECLARE_MODULE(if_wg, wg_moduledata, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_wg, WIREGUARD_VERSION); MODULE_DEPEND(if_wg, crypto, 1, 1, 1); diff --git a/sys/kern/sys_socket.c b/sys/kern/sys_socket.c index 7169805a0d3f..314576281d6e 100644 --- a/sys/kern/sys_socket.c +++ b/sys/kern/sys_socket.c @@ -1,836 +1,835 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_kern_ipc, OID_AUTO, aio, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "socket AIO stats"); static int empty_results; SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_results, CTLFLAG_RD, &empty_results, 0, "socket operation returned EAGAIN"); static int empty_retries; SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_retries, CTLFLAG_RD, &empty_retries, 0, "socket operation retries"); static fo_rdwr_t soo_read; static fo_rdwr_t soo_write; static fo_ioctl_t soo_ioctl; static fo_poll_t soo_poll; extern fo_kqfilter_t soo_kqfilter; static fo_stat_t soo_stat; static fo_close_t soo_close; static fo_fill_kinfo_t soo_fill_kinfo; static fo_aio_queue_t soo_aio_queue; static void soo_aio_cancel(struct kaiocb *job); struct fileops socketops = { .fo_read = soo_read, .fo_write = soo_write, .fo_truncate = invfo_truncate, .fo_ioctl = soo_ioctl, .fo_poll = soo_poll, .fo_kqfilter = soo_kqfilter, .fo_stat = soo_stat, .fo_close = soo_close, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_fill_kinfo = soo_fill_kinfo, .fo_aio_queue = soo_aio_queue, .fo_flags = DFLAG_PASSABLE }; static int soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct socket *so = fp->f_data; int error; #ifdef MAC error = mac_socket_check_receive(active_cred, so); if (error) return (error); #endif error = soreceive(so, 0, uio, 0, 0, 0); return (error); } static int soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct socket *so = fp->f_data; int error; #ifdef MAC error = mac_socket_check_send(active_cred, so); if (error) return (error); #endif error = sousrsend(so, NULL, uio, NULL, 0, NULL); return (error); } static int soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred, struct thread *td) { struct socket *so = fp->f_data; int error = 0; switch (cmd) { case FIONBIO: SOCK_LOCK(so); if (*(int *)data) so->so_state |= SS_NBIO; else so->so_state &= ~SS_NBIO; SOCK_UNLOCK(so); break; case FIOASYNC: if (*(int *)data) { SOCK_LOCK(so); so->so_state |= SS_ASYNC; if (SOLISTENING(so)) { so->sol_sbrcv_flags |= SB_ASYNC; so->sol_sbsnd_flags |= SB_ASYNC; } else { SOCK_RECVBUF_LOCK(so); so->so_rcv.sb_flags |= SB_ASYNC; SOCK_RECVBUF_UNLOCK(so); SOCK_SENDBUF_LOCK(so); so->so_snd.sb_flags |= SB_ASYNC; SOCK_SENDBUF_UNLOCK(so); } SOCK_UNLOCK(so); } else { SOCK_LOCK(so); so->so_state &= ~SS_ASYNC; if (SOLISTENING(so)) { so->sol_sbrcv_flags &= ~SB_ASYNC; so->sol_sbsnd_flags &= ~SB_ASYNC; } else { SOCK_RECVBUF_LOCK(so); so->so_rcv.sb_flags &= ~SB_ASYNC; SOCK_RECVBUF_UNLOCK(so); SOCK_SENDBUF_LOCK(so); so->so_snd.sb_flags &= ~SB_ASYNC; SOCK_SENDBUF_UNLOCK(so); } SOCK_UNLOCK(so); } break; case FIONREAD: SOCK_RECVBUF_LOCK(so); if (SOLISTENING(so)) { error = EINVAL; } else { *(int *)data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl; } SOCK_RECVBUF_UNLOCK(so); break; case FIONWRITE: /* Unlocked read. */ if (SOLISTENING(so)) { error = EINVAL; } else { *(int *)data = sbavail(&so->so_snd); } break; case FIONSPACE: /* Unlocked read. */ if (SOLISTENING(so)) { error = EINVAL; } else { if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) || (so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt)) { *(int *)data = 0; } else { *(int *)data = sbspace(&so->so_snd); } } break; case FIOSETOWN: error = fsetown(*(int *)data, &so->so_sigio); break; case FIOGETOWN: *(int *)data = fgetown(&so->so_sigio); break; case SIOCSPGRP: error = fsetown(-(*(int *)data), &so->so_sigio); break; case SIOCGPGRP: *(int *)data = -fgetown(&so->so_sigio); break; case SIOCATMARK: /* Unlocked read. */ if (SOLISTENING(so)) { error = EINVAL; } else { *(int *)data = (so->so_rcv.sb_state & SBS_RCVATMARK) != 0; } break; default: /* * Interface/routing/protocol specific ioctls: interface and * routing ioctls should have a different entry since a * socket is unnecessary. */ if (IOCGROUP(cmd) == 'i') error = ifioctl(so, cmd, data, td); else if (IOCGROUP(cmd) == 'r') { CURVNET_SET(so->so_vnet); error = rtioctl_fib(cmd, data, so->so_fibnum); CURVNET_RESTORE(); } else { CURVNET_SET(so->so_vnet); error = so->so_proto->pr_control(so, cmd, data, 0, td); CURVNET_RESTORE(); } break; } return (error); } static int soo_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td) { struct socket *so = fp->f_data; #ifdef MAC int error; error = mac_socket_check_poll(active_cred, so); if (error) return (error); #endif return (sopoll(so, events, fp->f_cred, td)); } static int soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred) { struct socket *so = fp->f_data; int error = 0; bzero((caddr_t)ub, sizeof (*ub)); ub->st_mode = S_IFSOCK; #ifdef MAC error = mac_socket_check_stat(active_cred, so); if (error) return (error); #endif SOCK_LOCK(so); if (!SOLISTENING(so)) { struct sockbuf *sb; /* * If SBS_CANTRCVMORE is set, but there's still data left * in the receive buffer, the socket is still readable. */ sb = &so->so_rcv; SOCK_RECVBUF_LOCK(so); if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb)) ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH; ub->st_size = sbavail(sb) - sb->sb_ctl; SOCK_RECVBUF_UNLOCK(so); sb = &so->so_snd; SOCK_SENDBUF_LOCK(so); if ((sb->sb_state & SBS_CANTSENDMORE) == 0) ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH; SOCK_SENDBUF_UNLOCK(so); } ub->st_uid = so->so_cred->cr_uid; ub->st_gid = so->so_cred->cr_gid; if (so->so_proto->pr_sense) error = so->so_proto->pr_sense(so, ub); SOCK_UNLOCK(so); return (error); } /* * API socket close on file pointer. We call soclose() to close the socket * (including initiating closing protocols). soclose() will sorele() the * file reference but the actual socket will not go away until the socket's * ref count hits 0. */ static int soo_close(struct file *fp, struct thread *td) { int error = 0; struct socket *so; so = fp->f_data; fp->f_ops = &badfileops; fp->f_data = NULL; if (so) error = soclose(so); return (error); } static int soo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { - struct sockaddr *sa; + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; struct inpcb *inpcb; struct unpcb *unpcb; struct socket *so; int error; kif->kf_type = KF_TYPE_SOCKET; so = fp->f_data; CURVNET_SET(so->so_vnet); kif->kf_un.kf_sock.kf_sock_domain0 = so->so_proto->pr_domain->dom_family; kif->kf_un.kf_sock.kf_sock_type0 = so->so_type; kif->kf_un.kf_sock.kf_sock_protocol0 = so->so_proto->pr_protocol; kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb; switch (kif->kf_un.kf_sock.kf_sock_domain0) { case AF_INET: case AF_INET6: if (so->so_pcb != NULL) { inpcb = (struct inpcb *)(so->so_pcb); kif->kf_un.kf_sock.kf_sock_inpcb = (uintptr_t)inpcb->inp_ppcb; } kif->kf_un.kf_sock.kf_sock_rcv_sb_state = so->so_rcv.sb_state; kif->kf_un.kf_sock.kf_sock_snd_sb_state = so->so_snd.sb_state; kif->kf_un.kf_sock.kf_sock_sendq = sbused(&so->so_snd); kif->kf_un.kf_sock.kf_sock_recvq = sbused(&so->so_rcv); break; case AF_UNIX: if (so->so_pcb != NULL) { unpcb = (struct unpcb *)(so->so_pcb); if (unpcb->unp_conn) { kif->kf_un.kf_sock.kf_sock_unpconn = (uintptr_t)unpcb->unp_conn; kif->kf_un.kf_sock.kf_sock_rcv_sb_state = so->so_rcv.sb_state; kif->kf_un.kf_sock.kf_sock_snd_sb_state = so->so_snd.sb_state; kif->kf_un.kf_sock.kf_sock_sendq = sbused(&so->so_snd); kif->kf_un.kf_sock.kf_sock_recvq = sbused(&so->so_rcv); } } break; } - error = so->so_proto->pr_sockaddr(so, &sa); + error = sosockaddr(so, (struct sockaddr *)&ss); if (error == 0 && - sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_local)) { - bcopy(sa, &kif->kf_un.kf_sock.kf_sa_local, sa->sa_len); - free(sa, M_SONAME); + ss.ss_len <= sizeof(kif->kf_un.kf_sock.kf_sa_local)) { + bcopy(&ss, &kif->kf_un.kf_sock.kf_sa_local, ss.ss_len); } - error = so->so_proto->pr_peeraddr(so, &sa); + ss.ss_len = sizeof(ss); + error = sopeeraddr(so, (struct sockaddr *)&ss); if (error == 0 && - sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_peer)) { - bcopy(sa, &kif->kf_un.kf_sock.kf_sa_peer, sa->sa_len); - free(sa, M_SONAME); + ss.ss_len <= sizeof(kif->kf_un.kf_sock.kf_sa_peer)) { + bcopy(&ss, &kif->kf_un.kf_sock.kf_sa_peer, ss.ss_len); } strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name, sizeof(kif->kf_path)); CURVNET_RESTORE(); return (0); } /* * Use the 'backend3' field in AIO jobs to store the amount of data * completed by the AIO job so far. */ #define aio_done backend3 static STAILQ_HEAD(, task) soaio_jobs; static struct mtx soaio_jobs_lock; static struct task soaio_kproc_task; static int soaio_starting, soaio_idle, soaio_queued; static struct unrhdr *soaio_kproc_unr; static int soaio_max_procs = MAX_AIO_PROCS; SYSCTL_INT(_kern_ipc_aio, OID_AUTO, max_procs, CTLFLAG_RW, &soaio_max_procs, 0, "Maximum number of kernel processes to use for async socket IO"); static int soaio_num_procs; SYSCTL_INT(_kern_ipc_aio, OID_AUTO, num_procs, CTLFLAG_RD, &soaio_num_procs, 0, "Number of active kernel processes for async socket IO"); static int soaio_target_procs = TARGET_AIO_PROCS; SYSCTL_INT(_kern_ipc_aio, OID_AUTO, target_procs, CTLFLAG_RD, &soaio_target_procs, 0, "Preferred number of ready kernel processes for async socket IO"); static int soaio_lifetime; SYSCTL_INT(_kern_ipc_aio, OID_AUTO, lifetime, CTLFLAG_RW, &soaio_lifetime, 0, "Maximum lifetime for idle aiod"); static void soaio_kproc_loop(void *arg) { struct proc *p; struct vmspace *myvm; struct task *task; int error, id, pending; id = (intptr_t)arg; /* * Grab an extra reference on the daemon's vmspace so that it * doesn't get freed by jobs that switch to a different * vmspace. */ p = curproc; myvm = vmspace_acquire_ref(p); mtx_lock(&soaio_jobs_lock); MPASS(soaio_starting > 0); soaio_starting--; for (;;) { while (!STAILQ_EMPTY(&soaio_jobs)) { task = STAILQ_FIRST(&soaio_jobs); STAILQ_REMOVE_HEAD(&soaio_jobs, ta_link); soaio_queued--; pending = task->ta_pending; task->ta_pending = 0; mtx_unlock(&soaio_jobs_lock); task->ta_func(task->ta_context, pending); mtx_lock(&soaio_jobs_lock); } MPASS(soaio_queued == 0); if (p->p_vmspace != myvm) { mtx_unlock(&soaio_jobs_lock); vmspace_switch_aio(myvm); mtx_lock(&soaio_jobs_lock); continue; } soaio_idle++; error = mtx_sleep(&soaio_idle, &soaio_jobs_lock, 0, "-", soaio_lifetime); soaio_idle--; if (error == EWOULDBLOCK && STAILQ_EMPTY(&soaio_jobs) && soaio_num_procs > soaio_target_procs) break; } soaio_num_procs--; mtx_unlock(&soaio_jobs_lock); free_unr(soaio_kproc_unr, id); kproc_exit(0); } static void soaio_kproc_create(void *context, int pending) { struct proc *p; int error, id; mtx_lock(&soaio_jobs_lock); for (;;) { if (soaio_num_procs < soaio_target_procs) { /* Must create */ } else if (soaio_num_procs >= soaio_max_procs) { /* * Hit the limit on kernel processes, don't * create another one. */ break; } else if (soaio_queued <= soaio_idle + soaio_starting) { /* * No more AIO jobs waiting for a process to be * created, so stop. */ break; } soaio_starting++; mtx_unlock(&soaio_jobs_lock); id = alloc_unr(soaio_kproc_unr); error = kproc_create(soaio_kproc_loop, (void *)(intptr_t)id, &p, 0, 0, "soaiod%d", id); if (error != 0) { free_unr(soaio_kproc_unr, id); mtx_lock(&soaio_jobs_lock); soaio_starting--; break; } mtx_lock(&soaio_jobs_lock); soaio_num_procs++; } mtx_unlock(&soaio_jobs_lock); } void soaio_enqueue(struct task *task) { mtx_lock(&soaio_jobs_lock); MPASS(task->ta_pending == 0); task->ta_pending++; STAILQ_INSERT_TAIL(&soaio_jobs, task, ta_link); soaio_queued++; if (soaio_queued <= soaio_idle) wakeup_one(&soaio_idle); else if (soaio_num_procs < soaio_max_procs) taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task); mtx_unlock(&soaio_jobs_lock); } static void soaio_init(void) { soaio_lifetime = AIOD_LIFETIME_DEFAULT; STAILQ_INIT(&soaio_jobs); mtx_init(&soaio_jobs_lock, "soaio jobs", NULL, MTX_DEF); soaio_kproc_unr = new_unrhdr(1, INT_MAX, NULL); TASK_INIT(&soaio_kproc_task, 0, soaio_kproc_create, NULL); } SYSINIT(soaio, SI_SUB_VFS, SI_ORDER_ANY, soaio_init, NULL); static __inline int soaio_ready(struct socket *so, struct sockbuf *sb) { return (sb == &so->so_rcv ? soreadable(so) : sowriteable(so)); } static void soaio_process_job(struct socket *so, sb_which which, struct kaiocb *job) { struct ucred *td_savedcred; struct thread *td; struct sockbuf *sb = sobuf(so, which); #ifdef MAC struct file *fp = job->fd_file; #endif size_t cnt, done, job_total_nbytes __diagused; long ru_before; int error, flags; SOCK_BUF_UNLOCK(so, which); aio_switch_vmspace(job); td = curthread; retry: td_savedcred = td->td_ucred; td->td_ucred = job->cred; job_total_nbytes = job->uiop->uio_resid + job->aio_done; done = job->aio_done; cnt = job->uiop->uio_resid; job->uiop->uio_offset = 0; job->uiop->uio_td = td; flags = MSG_NBIO; /* * For resource usage accounting, only count a completed request * as a single message to avoid counting multiple calls to * sosend/soreceive on a blocking socket. */ if (sb == &so->so_rcv) { ru_before = td->td_ru.ru_msgrcv; #ifdef MAC error = mac_socket_check_receive(fp->f_cred, so); if (error == 0) #endif error = soreceive(so, NULL, job->uiop, NULL, NULL, &flags); if (td->td_ru.ru_msgrcv != ru_before) job->msgrcv = 1; } else { if (!TAILQ_EMPTY(&sb->sb_aiojobq)) flags |= MSG_MORETOCOME; ru_before = td->td_ru.ru_msgsnd; #ifdef MAC error = mac_socket_check_send(fp->f_cred, so); if (error == 0) #endif error = sousrsend(so, NULL, job->uiop, NULL, flags, job->userproc); if (td->td_ru.ru_msgsnd != ru_before) job->msgsnd = 1; } done += cnt - job->uiop->uio_resid; job->aio_done = done; td->td_ucred = td_savedcred; if (error == EWOULDBLOCK) { /* * The request was either partially completed or not * completed at all due to racing with a read() or * write() on the socket. If the socket is * non-blocking, return with any partial completion. * If the socket is blocking or if no progress has * been made, requeue this request at the head of the * queue to try again when the socket is ready. */ MPASS(done != job_total_nbytes); SOCK_BUF_LOCK(so, which); if (done == 0 || !(so->so_state & SS_NBIO)) { empty_results++; if (soaio_ready(so, sb)) { empty_retries++; SOCK_BUF_UNLOCK(so, which); goto retry; } if (!aio_set_cancel_function(job, soo_aio_cancel)) { SOCK_BUF_UNLOCK(so, which); if (done != 0) aio_complete(job, done, 0); else aio_cancel(job); SOCK_BUF_LOCK(so, which); } else { TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list); } return; } SOCK_BUF_UNLOCK(so, which); } if (done != 0 && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; if (error) aio_complete(job, -1, error); else aio_complete(job, done, 0); SOCK_BUF_LOCK(so, which); } static void soaio_process_sb(struct socket *so, sb_which which) { struct kaiocb *job; struct sockbuf *sb = sobuf(so, which); CURVNET_SET(so->so_vnet); SOCK_BUF_LOCK(so, which); while (!TAILQ_EMPTY(&sb->sb_aiojobq) && soaio_ready(so, sb)) { job = TAILQ_FIRST(&sb->sb_aiojobq); TAILQ_REMOVE(&sb->sb_aiojobq, job, list); if (!aio_clear_cancel_function(job)) continue; soaio_process_job(so, which, job); } /* * If there are still pending requests, the socket must not be * ready so set SB_AIO to request a wakeup when the socket * becomes ready. */ if (!TAILQ_EMPTY(&sb->sb_aiojobq)) sb->sb_flags |= SB_AIO; sb->sb_flags &= ~SB_AIO_RUNNING; SOCK_BUF_UNLOCK(so, which); sorele(so); CURVNET_RESTORE(); } void soaio_rcv(void *context, int pending) { struct socket *so; so = context; soaio_process_sb(so, SO_RCV); } void soaio_snd(void *context, int pending) { struct socket *so; so = context; soaio_process_sb(so, SO_SND); } void sowakeup_aio(struct socket *so, sb_which which) { struct sockbuf *sb = sobuf(so, which); SOCK_BUF_LOCK_ASSERT(so, which); sb->sb_flags &= ~SB_AIO; if (sb->sb_flags & SB_AIO_RUNNING) return; sb->sb_flags |= SB_AIO_RUNNING; soref(so); soaio_enqueue(&sb->sb_aiotask); } static void soo_aio_cancel(struct kaiocb *job) { struct socket *so; struct sockbuf *sb; long done; int opcode; sb_which which; so = job->fd_file->f_data; opcode = job->uaiocb.aio_lio_opcode; if (opcode & LIO_READ) { sb = &so->so_rcv; which = SO_RCV; } else { MPASS(opcode & LIO_WRITE); sb = &so->so_snd; which = SO_SND; } SOCK_BUF_LOCK(so, which); if (!aio_cancel_cleared(job)) TAILQ_REMOVE(&sb->sb_aiojobq, job, list); if (TAILQ_EMPTY(&sb->sb_aiojobq)) sb->sb_flags &= ~SB_AIO; SOCK_BUF_UNLOCK(so, which); done = job->aio_done; if (done != 0) aio_complete(job, done, 0); else aio_cancel(job); } static int soo_aio_queue(struct file *fp, struct kaiocb *job) { struct socket *so; struct sockbuf *sb; sb_which which; int error; so = fp->f_data; error = so->so_proto->pr_aio_queue(so, job); if (error == 0) return (0); /* Lock through the socket, since this may be a listening socket. */ switch (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) { case LIO_READ: SOCK_RECVBUF_LOCK(so); sb = &so->so_rcv; which = SO_RCV; break; case LIO_WRITE: SOCK_SENDBUF_LOCK(so); sb = &so->so_snd; which = SO_SND; break; default: return (EINVAL); } if (SOLISTENING(so)) { SOCK_BUF_UNLOCK(so, which); return (EINVAL); } if (!aio_set_cancel_function(job, soo_aio_cancel)) panic("new job was cancelled"); TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list); if (!(sb->sb_flags & SB_AIO_RUNNING)) { if (soaio_ready(so, sb)) sowakeup_aio(so, which); else sb->sb_flags |= SB_AIO; } SOCK_BUF_UNLOCK(so, which); return (0); } diff --git a/sys/kern/uipc_domain.c b/sys/kern/uipc_domain.c index cf9b91511574..435b13842041 100644 --- a/sys/kern/uipc_domain.c +++ b/sys/kern/uipc_domain.c @@ -1,394 +1,394 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct domainhead domains = SLIST_HEAD_INITIALIZER(&domains); int domain_init_status = 1; static struct mtx dom_mtx; /* domain list lock */ MTX_SYSINIT(domain, &dom_mtx, "domain list", MTX_DEF); static int pr_accept_notsupp(struct socket *so, struct sockaddr *sa) { return (EOPNOTSUPP); } static int pr_aio_queue_notsupp(struct socket *so, struct kaiocb *job) { return (EOPNOTSUPP); } static int pr_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) { return (EOPNOTSUPP); } static int pr_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { return (EOPNOTSUPP); } static int pr_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) { return (EOPNOTSUPP); } static int pr_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { return (EOPNOTSUPP); } static int pr_connect2_notsupp(struct socket *so1, struct socket *so2) { return (EOPNOTSUPP); } static int pr_control_notsupp(struct socket *so, u_long cmd, void *data, struct ifnet *ifp, struct thread *td) { return (EOPNOTSUPP); } static int pr_disconnect_notsupp(struct socket *so) { return (EOPNOTSUPP); } static int pr_listen_notsupp(struct socket *so, int backlog, struct thread *td) { return (EOPNOTSUPP); } static int -pr_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) +pr_peeraddr_notsupp(struct socket *so, struct sockaddr *nam) { return (EOPNOTSUPP); } static int pr_rcvd_notsupp(struct socket *so, int flags) { return (EOPNOTSUPP); } static int pr_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) { return (EOPNOTSUPP); } static int pr_send_notsupp(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *td) { if (control != NULL) m_freem(control); if ((flags & PRUS_NOTREADY) == 0) m_freem(m); return (EOPNOTSUPP); } static int pr_ready_notsupp(struct socket *so, struct mbuf *m, int count) { return (EOPNOTSUPP); } static int pr_shutdown_notsupp(struct socket *so) { return (EOPNOTSUPP); } static int -pr_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) +pr_sockaddr_notsupp(struct socket *so, struct sockaddr *nam) { return (EOPNOTSUPP); } static int pr_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { return (EOPNOTSUPP); } static int pr_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { return (EOPNOTSUPP); } static int pr_sopoll_notsupp(struct socket *so, int events, struct ucred *cred, struct thread *td) { return (EOPNOTSUPP); } static void pr_init(struct domain *dom, struct protosw *pr) { KASSERT(pr->pr_attach != NULL, ("%s: protocol doesn't have pr_attach", __func__)); pr->pr_domain = dom; #define DEFAULT(foo, bar) if (pr->foo == NULL) pr->foo = bar DEFAULT(pr_sosend, sosend_generic); DEFAULT(pr_soreceive, soreceive_generic); DEFAULT(pr_sopoll, sopoll_generic); DEFAULT(pr_setsbopt, sbsetopt); #define NOTSUPP(foo) if (pr->foo == NULL) pr->foo = foo ## _notsupp NOTSUPP(pr_accept); NOTSUPP(pr_aio_queue); NOTSUPP(pr_bind); NOTSUPP(pr_bindat); NOTSUPP(pr_connect); NOTSUPP(pr_connect2); NOTSUPP(pr_connectat); NOTSUPP(pr_control); NOTSUPP(pr_disconnect); NOTSUPP(pr_listen); NOTSUPP(pr_peeraddr); NOTSUPP(pr_rcvd); NOTSUPP(pr_rcvoob); NOTSUPP(pr_send); NOTSUPP(pr_shutdown); NOTSUPP(pr_sockaddr); NOTSUPP(pr_sosend); NOTSUPP(pr_soreceive); NOTSUPP(pr_sopoll); NOTSUPP(pr_ready); } /* * Add a new protocol domain to the list of supported domains * Note: you cant unload it again because a socket may be using it. * XXX can't fail at this time. */ void domain_add(struct domain *dp) { struct protosw *pr; MPASS(IS_DEFAULT_VNET(curvnet)); if (dp->dom_probe != NULL && (*dp->dom_probe)() != 0) return; for (int i = 0; i < dp->dom_nprotosw; i++) if ((pr = dp->dom_protosw[i]) != NULL) pr_init(dp, pr); mtx_lock(&dom_mtx); #ifdef INVARIANTS struct domain *tmp; SLIST_FOREACH(tmp, &domains, dom_next) MPASS(tmp->dom_family != dp->dom_family); #endif SLIST_INSERT_HEAD(&domains, dp, dom_next); mtx_unlock(&dom_mtx); } void domain_remove(struct domain *dp) { if ((dp->dom_flags & DOMF_UNLOADABLE) == 0) return; mtx_lock(&dom_mtx); SLIST_REMOVE(&domains, dp, domain, dom_next); mtx_unlock(&dom_mtx); } static void domainfinalize(void *dummy) { mtx_lock(&dom_mtx); KASSERT(domain_init_status == 1, ("domainfinalize called too late!")); domain_init_status = 2; mtx_unlock(&dom_mtx); } SYSINIT(domainfin, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, domainfinalize, NULL); struct domain * pffinddomain(int family) { struct domain *dp; SLIST_FOREACH(dp, &domains, dom_next) if (dp->dom_family == family) return (dp); return (NULL); } struct protosw * pffindproto(int family, int type, int proto) { struct domain *dp; struct protosw *pr; dp = pffinddomain(family); if (dp == NULL) return (NULL); for (int i = 0; i < dp->dom_nprotosw; i++) if ((pr = dp->dom_protosw[i]) != NULL && pr->pr_type == type && (pr->pr_protocol == 0 || proto == 0 || pr->pr_protocol == proto)) return (pr); return (NULL); } /* * The caller must make sure that the new protocol is fully set up and ready to * accept requests before it is registered. */ int protosw_register(struct domain *dp, struct protosw *npr) { struct protosw **prp; MPASS(dp); MPASS(npr && npr->pr_type > 0 && npr->pr_protocol > 0); prp = NULL; /* * Protect us against races when two protocol registrations for * the same protocol happen at the same time. */ mtx_lock(&dom_mtx); for (int i = 0; i < dp->dom_nprotosw; i++) { if (dp->dom_protosw[i] == NULL) { /* Remember the first free spacer. */ if (prp == NULL) prp = &dp->dom_protosw[i]; } else { /* * The new protocol must not yet exist. * XXXAO: Check only protocol? * XXXGL: Maybe assert that it doesn't exist? */ if ((dp->dom_protosw[i]->pr_type == npr->pr_type) && (dp->dom_protosw[i]->pr_protocol == npr->pr_protocol)) { mtx_unlock(&dom_mtx); return (EEXIST); } } } /* If no free spacer is found we can't add the new protocol. */ if (prp == NULL) { mtx_unlock(&dom_mtx); return (ENOMEM); } pr_init(dp, npr); *prp = npr; mtx_unlock(&dom_mtx); return (0); } /* * The caller must make sure the protocol and its functions correctly shut down * all sockets and release all locks and memory references. */ int protosw_unregister(struct protosw *pr) { struct domain *dp; struct protosw **prp; dp = pr->pr_domain; prp = NULL; mtx_lock(&dom_mtx); /* The protocol must exist and only once. */ for (int i = 0; i < dp->dom_nprotosw; i++) { if (dp->dom_protosw[i] == pr) { KASSERT(prp == NULL, ("%s: domain %p protocol %p registered twice\n", __func__, dp, pr)); prp = &dp->dom_protosw[i]; } } /* Protocol does not exist. XXXGL: assert that it does? */ if (prp == NULL) { mtx_unlock(&dom_mtx); return (EPROTONOSUPPORT); } /* De-orbit the protocol and make the slot available again. */ *prp = NULL; mtx_unlock(&dom_mtx); return (0); } diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c index d16c0049dc43..efa349d140ff 100644 --- a/sys/kern/uipc_socket.c +++ b/sys/kern/uipc_socket.c @@ -1,4404 +1,4438 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1988, 1990, 1993 * The Regents of the University of California. * Copyright (c) 2004 The FreeBSD Foundation * Copyright (c) 2004-2008 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Comments on the socket life cycle: * * soalloc() sets of socket layer state for a socket, called only by * socreate() and sonewconn(). Socket layer private. * * sodealloc() tears down socket layer state for a socket, called only by * sofree() and sonewconn(). Socket layer private. * * pru_attach() associates protocol layer state with an allocated socket; * called only once, may fail, aborting socket allocation. This is called * from socreate() and sonewconn(). Socket layer private. * * pru_detach() disassociates protocol layer state from an attached socket, * and will be called exactly once for sockets in which pru_attach() has * been successfully called. If pru_attach() returned an error, * pru_detach() will not be called. Socket layer private. * * pru_abort() and pru_close() notify the protocol layer that the last * consumer of a socket is starting to tear down the socket, and that the * protocol should terminate the connection. Historically, pru_abort() also * detached protocol state from the socket state, but this is no longer the * case. * * socreate() creates a socket and attaches protocol state. This is a public * interface that may be used by socket layer consumers to create new * sockets. * * sonewconn() creates a socket and attaches protocol state. This is a * public interface that may be used by protocols to create new sockets when * a new connection is received and will be available for accept() on a * listen socket. * * soclose() destroys a socket after possibly waiting for it to disconnect. * This is a public interface that socket consumers should use to close and * release a socket when done with it. * * soabort() destroys a socket without waiting for it to disconnect (used * only for incoming connections that are already partially or fully * connected). This is used internally by the socket layer when clearing * listen socket queues (due to overflow or close on the listen socket), but * is also a public interface protocols may use to abort connections in * their incomplete listen queues should they no longer be required. Sockets * placed in completed connection listen queues should not be aborted for * reasons described in the comment above the soclose() implementation. This * is not a general purpose close routine, and except in the specific * circumstances described here, should not be used. * * sofree() will free a socket and its protocol state if all references on * the socket have been released, and is the public interface to attempt to * free a socket when a reference is removed. This is a socket layer private * interface. * * NOTE: In addition to socreate() and soclose(), which provide a single * socket reference to the consumer to be managed as required, there are two * calls to explicitly manage socket references, soref(), and sorele(). * Currently, these are generally required only when transitioning a socket * from a listen queue to a file descriptor, in order to prevent garbage * collection of the socket at an untimely moment. For a number of reasons, * these interfaces are not preferred, and should be avoided. * * NOTE: With regard to VNETs the general rule is that callers do not set * curvnet. Exceptions to this rule include soabort(), sodisconnect(), * sofree() (and with that sorele(), sotryfree()), as well as sonewconn() * and sorflush(), which are usually called from a pre-set VNET context. * sopoll() currently does not need a VNET context to be set. */ #include #include "opt_inet.h" #include "opt_inet6.h" #include "opt_kern_tls.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include /* for struct knote */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef COMPAT_FREEBSD32 #include #include #include #endif static int soreceive_rcvoob(struct socket *so, struct uio *uio, int flags); static void so_rdknl_lock(void *); static void so_rdknl_unlock(void *); static void so_rdknl_assert_lock(void *, int); static void so_wrknl_lock(void *); static void so_wrknl_unlock(void *); static void so_wrknl_assert_lock(void *, int); static void filt_sordetach(struct knote *kn); static int filt_soread(struct knote *kn, long hint); static void filt_sowdetach(struct knote *kn); static int filt_sowrite(struct knote *kn, long hint); static int filt_soempty(struct knote *kn, long hint); static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id); fo_kqfilter_t soo_kqfilter; static struct filterops soread_filtops = { .f_isfd = 1, .f_detach = filt_sordetach, .f_event = filt_soread, }; static struct filterops sowrite_filtops = { .f_isfd = 1, .f_detach = filt_sowdetach, .f_event = filt_sowrite, }; static struct filterops soempty_filtops = { .f_isfd = 1, .f_detach = filt_sowdetach, .f_event = filt_soempty, }; so_gen_t so_gencnt; /* generation count for sockets */ MALLOC_DEFINE(M_SONAME, "soname", "socket name"); MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); #define VNET_SO_ASSERT(so) \ VNET_ASSERT(curvnet != NULL, \ ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so))); VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]); #define V_socket_hhh VNET(socket_hhh) /* * Limit on the number of connections in the listen queue waiting * for accept(2). * NB: The original sysctl somaxconn is still available but hidden * to prevent confusion about the actual purpose of this number. */ static u_int somaxconn = SOMAXCONN; static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS) { int error; int val; val = somaxconn; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr ) return (error); /* * The purpose of the UINT_MAX / 3 limit, is so that the formula * 3 * so_qlimit / 2 * below, will not overflow. */ if (val < 1 || val > UINT_MAX / 3) return (EINVAL); somaxconn = val; return (0); } SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(int), sysctl_somaxconn, "I", "Maximum listen socket pending connection accept queue size"); SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE, 0, sizeof(int), sysctl_somaxconn, "I", "Maximum listen socket pending connection accept queue size (compat)"); static int numopensockets; SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, &numopensockets, 0, "Number of open sockets"); /* * so_global_mtx protects so_gencnt, numopensockets, and the per-socket * so_gencnt field. */ static struct mtx so_global_mtx; MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); /* * General IPC sysctl name space, used by sockets and a variety of other IPC * types. */ SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "IPC"); /* * Initialize the socket subsystem and set up the socket * memory allocator. */ static uma_zone_t socket_zone; int maxsockets; static void socket_zone_change(void *tag) { maxsockets = uma_zone_set_max(socket_zone, maxsockets); } static void socket_hhook_register(int subtype) { if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype, &V_socket_hhh[subtype], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) printf("%s: WARNING: unable to register hook\n", __func__); } static void socket_hhook_deregister(int subtype) { if (hhook_head_deregister(V_socket_hhh[subtype]) != 0) printf("%s: WARNING: unable to deregister hook\n", __func__); } static void socket_init(void *tag) { socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); maxsockets = uma_zone_set_max(socket_zone, maxsockets); uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached"); EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL, EVENTHANDLER_PRI_FIRST); } SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL); static void socket_vnet_init(const void *unused __unused) { int i; /* We expect a contiguous range */ for (i = 0; i <= HHOOK_SOCKET_LAST; i++) socket_hhook_register(i); } VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_vnet_init, NULL); static void socket_vnet_uninit(const void *unused __unused) { int i; for (i = 0; i <= HHOOK_SOCKET_LAST; i++) socket_hhook_deregister(i); } VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_vnet_uninit, NULL); /* * Initialise maxsockets. This SYSINIT must be run after * tunable_mbinit(). */ static void init_maxsockets(void *ignored) { TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); maxsockets = imax(maxsockets, maxfiles); } SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); /* * Sysctl to get and set the maximum global sockets limit. Notify protocols * of the change so that they can update their dependent limits as required. */ static int sysctl_maxsockets(SYSCTL_HANDLER_ARGS) { int error, newmaxsockets; newmaxsockets = maxsockets; error = sysctl_handle_int(oidp, &newmaxsockets, 0, req); if (error == 0 && req->newptr && newmaxsockets != maxsockets) { if (newmaxsockets > maxsockets && newmaxsockets <= maxfiles) { maxsockets = newmaxsockets; EVENTHANDLER_INVOKE(maxsockets_change); } else error = EINVAL; } return (error); } SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, &maxsockets, 0, sysctl_maxsockets, "IU", "Maximum number of sockets available"); /* * Socket operation routines. These routines are called by the routines in * sys_socket.c or from a system process, and implement the semantics of * socket operations by switching out to the protocol specific routines. */ /* * Get a socket structure from our zone, and initialize it. Note that it * would probably be better to allocate socket and PCB at the same time, but * I'm not convinced that all the protocols can be easily modified to do * this. * * soalloc() returns a socket with a ref count of 0. */ static struct socket * soalloc(struct vnet *vnet) { struct socket *so; so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO); if (so == NULL) return (NULL); #ifdef MAC if (mac_socket_init(so, M_NOWAIT) != 0) { uma_zfree(socket_zone, so); return (NULL); } #endif if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) { uma_zfree(socket_zone, so); return (NULL); } /* * The socket locking protocol allows to lock 2 sockets at a time, * however, the first one must be a listening socket. WITNESS lacks * a feature to change class of an existing lock, so we use DUPOK. */ mtx_init(&so->so_lock, "socket", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&so->so_snd_mtx, "so_snd", NULL, MTX_DEF); mtx_init(&so->so_rcv_mtx, "so_rcv", NULL, MTX_DEF); so->so_rcv.sb_sel = &so->so_rdsel; so->so_snd.sb_sel = &so->so_wrsel; sx_init(&so->so_snd_sx, "so_snd_sx"); sx_init(&so->so_rcv_sx, "so_rcv_sx"); TAILQ_INIT(&so->so_snd.sb_aiojobq); TAILQ_INIT(&so->so_rcv.sb_aiojobq); TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so); TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so); #ifdef VIMAGE VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p", __func__, __LINE__, so)); so->so_vnet = vnet; #endif /* We shouldn't need the so_global_mtx */ if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) { /* Do we need more comprehensive error returns? */ uma_zfree(socket_zone, so); return (NULL); } mtx_lock(&so_global_mtx); so->so_gencnt = ++so_gencnt; ++numopensockets; #ifdef VIMAGE vnet->vnet_sockcnt++; #endif mtx_unlock(&so_global_mtx); return (so); } /* * Free the storage associated with a socket at the socket layer, tear down * locks, labels, etc. All protocol state is assumed already to have been * torn down (and possibly never set up) by the caller. */ void sodealloc(struct socket *so) { KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); mtx_lock(&so_global_mtx); so->so_gencnt = ++so_gencnt; --numopensockets; /* Could be below, but faster here. */ #ifdef VIMAGE VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p", __func__, __LINE__, so)); so->so_vnet->vnet_sockcnt--; #endif mtx_unlock(&so_global_mtx); #ifdef MAC mac_socket_destroy(so); #endif hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE); khelp_destroy_osd(&so->osd); if (SOLISTENING(so)) { if (so->sol_accept_filter != NULL) accept_filt_setopt(so, NULL); } else { if (so->so_rcv.sb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); if (so->so_snd.sb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); sx_destroy(&so->so_snd_sx); sx_destroy(&so->so_rcv_sx); mtx_destroy(&so->so_snd_mtx); mtx_destroy(&so->so_rcv_mtx); } crfree(so->so_cred); mtx_destroy(&so->so_lock); uma_zfree(socket_zone, so); } /* * socreate returns a socket with a ref count of 1 and a file descriptor * reference. The socket should be closed with soclose(). */ int socreate(int dom, struct socket **aso, int type, int proto, struct ucred *cred, struct thread *td) { struct protosw *prp; struct socket *so; int error; /* * XXX: divert(4) historically abused PF_INET. Keep this compatibility * shim until all applications have been updated. */ if (__predict_false(dom == PF_INET && type == SOCK_RAW && proto == IPPROTO_DIVERT)) { dom = PF_DIVERT; printf("%s uses obsolete way to create divert(4) socket\n", td->td_proc->p_comm); } prp = pffindproto(dom, type, proto); if (prp == NULL) { /* No support for domain. */ if (pffinddomain(dom) == NULL) return (EAFNOSUPPORT); /* No support for socket type. */ if (proto == 0 && type != 0) return (EPROTOTYPE); return (EPROTONOSUPPORT); } MPASS(prp->pr_attach); if (IN_CAPABILITY_MODE(td) && (prp->pr_flags & PR_CAPATTACH) == 0) return (ECAPMODE); if (prison_check_af(cred, prp->pr_domain->dom_family) != 0) return (EPROTONOSUPPORT); so = soalloc(CRED_TO_VNET(cred)); if (so == NULL) return (ENOBUFS); so->so_type = type; so->so_cred = crhold(cred); if ((prp->pr_domain->dom_family == PF_INET) || (prp->pr_domain->dom_family == PF_INET6) || (prp->pr_domain->dom_family == PF_ROUTE)) so->so_fibnum = td->td_proc->p_fibnum; else so->so_fibnum = 0; so->so_proto = prp; #ifdef MAC mac_socket_create(cred, so); #endif knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock, so_rdknl_assert_lock); knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock, so_wrknl_assert_lock); if ((prp->pr_flags & PR_SOCKBUF) == 0) { so->so_snd.sb_mtx = &so->so_snd_mtx; so->so_rcv.sb_mtx = &so->so_rcv_mtx; } /* * Auto-sizing of socket buffers is managed by the protocols and * the appropriate flags must be set in the pru_attach function. */ CURVNET_SET(so->so_vnet); error = prp->pr_attach(so, proto, td); CURVNET_RESTORE(); if (error) { sodealloc(so); return (error); } soref(so); *aso = so; return (0); } #ifdef REGRESSION static int regression_sonewconn_earlytest = 1; SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); #endif static int sooverprio = LOG_DEBUG; SYSCTL_INT(_kern_ipc, OID_AUTO, sooverprio, CTLFLAG_RW, &sooverprio, 0, "Log priority for listen socket overflows: 0..7 or -1 to disable"); static struct timeval overinterval = { 60, 0 }; SYSCTL_TIMEVAL_SEC(_kern_ipc, OID_AUTO, sooverinterval, CTLFLAG_RW, &overinterval, "Delay in seconds between warnings for listen socket overflows"); /* * When an attempt at a new connection is noted on a socket which supports * accept(2), the protocol has two options: * 1) Call legacy sonewconn() function, which would call protocol attach * method, same as used for socket(2). * 2) Call solisten_clone(), do attach that is specific to a cloned connection, * and then call solisten_enqueue(). * * Note: the ref count on the socket is 0 on return. */ struct socket * solisten_clone(struct socket *head) { struct sbuf descrsb; struct socket *so; int len, overcount; u_int qlen; const char localprefix[] = "local:"; char descrbuf[SUNPATHLEN + sizeof(localprefix)]; #if defined(INET6) char addrbuf[INET6_ADDRSTRLEN]; #elif defined(INET) char addrbuf[INET_ADDRSTRLEN]; #endif bool dolog, over; SOLISTEN_LOCK(head); over = (head->sol_qlen > 3 * head->sol_qlimit / 2); #ifdef REGRESSION if (regression_sonewconn_earlytest && over) { #else if (over) { #endif head->sol_overcount++; dolog = (sooverprio >= 0) && !!ratecheck(&head->sol_lastover, &overinterval); /* * If we're going to log, copy the overflow count and queue * length from the listen socket before dropping the lock. * Also, reset the overflow count. */ if (dolog) { overcount = head->sol_overcount; head->sol_overcount = 0; qlen = head->sol_qlen; } SOLISTEN_UNLOCK(head); if (dolog) { /* * Try to print something descriptive about the * socket for the error message. */ sbuf_new(&descrsb, descrbuf, sizeof(descrbuf), SBUF_FIXEDLEN); switch (head->so_proto->pr_domain->dom_family) { #if defined(INET) || defined(INET6) #ifdef INET case AF_INET: #endif #ifdef INET6 case AF_INET6: if (head->so_proto->pr_domain->dom_family == AF_INET6 || (sotoinpcb(head)->inp_inc.inc_flags & INC_ISIPV6)) { ip6_sprintf(addrbuf, &sotoinpcb(head)->inp_inc.inc6_laddr); sbuf_printf(&descrsb, "[%s]", addrbuf); } else #endif { #ifdef INET inet_ntoa_r( sotoinpcb(head)->inp_inc.inc_laddr, addrbuf); sbuf_cat(&descrsb, addrbuf); #endif } sbuf_printf(&descrsb, ":%hu (proto %u)", ntohs(sotoinpcb(head)->inp_inc.inc_lport), head->so_proto->pr_protocol); break; #endif /* INET || INET6 */ case AF_UNIX: sbuf_cat(&descrsb, localprefix); if (sotounpcb(head)->unp_addr != NULL) len = sotounpcb(head)->unp_addr->sun_len - offsetof(struct sockaddr_un, sun_path); else len = 0; if (len > 0) sbuf_bcat(&descrsb, sotounpcb(head)->unp_addr->sun_path, len); else sbuf_cat(&descrsb, "(unknown)"); break; } /* * If we can't print something more specific, at least * print the domain name. */ if (sbuf_finish(&descrsb) != 0 || sbuf_len(&descrsb) <= 0) { sbuf_clear(&descrsb); sbuf_cat(&descrsb, head->so_proto->pr_domain->dom_name ?: "unknown"); sbuf_finish(&descrsb); } KASSERT(sbuf_len(&descrsb) > 0, ("%s: sbuf creation failed", __func__)); /* * Preserve the historic listen queue overflow log * message, that starts with "sonewconn:". It has * been known to sysadmins for years and also test * sys/kern/sonewconn_overflow checks for it. */ if (head->so_cred == 0) { log(LOG_PRI(sooverprio), "sonewconn: pcb %p (%s): " "Listen queue overflow: %i already in " "queue awaiting acceptance (%d " "occurrences)\n", head->so_pcb, sbuf_data(&descrsb), qlen, overcount); } else { log(LOG_PRI(sooverprio), "sonewconn: pcb %p (%s): " "Listen queue overflow: " "%i already in queue awaiting acceptance " "(%d occurrences), euid %d, rgid %d, jail %s\n", head->so_pcb, sbuf_data(&descrsb), qlen, overcount, head->so_cred->cr_uid, head->so_cred->cr_rgid, head->so_cred->cr_prison ? head->so_cred->cr_prison->pr_name : "not_jailed"); } sbuf_delete(&descrsb); overcount = 0; } return (NULL); } SOLISTEN_UNLOCK(head); VNET_ASSERT(head->so_vnet != NULL, ("%s: so %p vnet is NULL", __func__, head)); so = soalloc(head->so_vnet); if (so == NULL) { log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: " "limit reached or out of memory\n", __func__, head->so_pcb); return (NULL); } so->so_listen = head; so->so_type = head->so_type; /* * POSIX is ambiguous on what options an accept(2)ed socket should * inherit from the listener. Words "create a new socket" may be * interpreted as not inheriting anything. Best programming practice * for application developers is to not rely on such inheritance. * FreeBSD had historically inherited all so_options excluding * SO_ACCEPTCONN, which virtually means all SOL_SOCKET level options, * including those completely irrelevant to a new born socket. For * compatibility with older versions we will inherit a list of * meaningful options. */ so->so_options = head->so_options & (SO_KEEPALIVE | SO_DONTROUTE | SO_LINGER | SO_OOBINLINE | SO_NOSIGPIPE); so->so_linger = head->so_linger; so->so_state = head->so_state; so->so_fibnum = head->so_fibnum; so->so_proto = head->so_proto; so->so_cred = crhold(head->so_cred); #ifdef MAC mac_socket_newconn(head, so); #endif knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock, so_rdknl_assert_lock); knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock, so_wrknl_assert_lock); VNET_SO_ASSERT(head); if (soreserve(so, head->sol_sbsnd_hiwat, head->sol_sbrcv_hiwat)) { sodealloc(so); log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n", __func__, head->so_pcb); return (NULL); } so->so_rcv.sb_lowat = head->sol_sbrcv_lowat; so->so_snd.sb_lowat = head->sol_sbsnd_lowat; so->so_rcv.sb_timeo = head->sol_sbrcv_timeo; so->so_snd.sb_timeo = head->sol_sbsnd_timeo; so->so_rcv.sb_flags = head->sol_sbrcv_flags & SB_AUTOSIZE; so->so_snd.sb_flags = head->sol_sbsnd_flags & SB_AUTOSIZE; if ((so->so_proto->pr_flags & PR_SOCKBUF) == 0) { so->so_snd.sb_mtx = &so->so_snd_mtx; so->so_rcv.sb_mtx = &so->so_rcv_mtx; } return (so); } /* Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED. */ struct socket * sonewconn(struct socket *head, int connstatus) { struct socket *so; if ((so = solisten_clone(head)) == NULL) return (NULL); if (so->so_proto->pr_attach(so, 0, NULL) != 0) { sodealloc(so); log(LOG_DEBUG, "%s: pcb %p: pr_attach() failed\n", __func__, head->so_pcb); return (NULL); } (void)solisten_enqueue(so, connstatus); return (so); } /* * Enqueue socket cloned by solisten_clone() to the listen queue of the * listener it has been cloned from. * * Return 'true' if socket landed on complete queue, otherwise 'false'. */ bool solisten_enqueue(struct socket *so, int connstatus) { struct socket *head = so->so_listen; MPASS(refcount_load(&so->so_count) == 0); refcount_init(&so->so_count, 1); SOLISTEN_LOCK(head); if (head->sol_accept_filter != NULL) connstatus = 0; so->so_state |= connstatus; soref(head); /* A socket on (in)complete queue refs head. */ if (connstatus) { TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list); so->so_qstate = SQ_COMP; head->sol_qlen++; solisten_wakeup(head); /* unlocks */ return (true); } else { /* * Keep removing sockets from the head until there's room for * us to insert on the tail. In pre-locking revisions, this * was a simple if(), but as we could be racing with other * threads and soabort() requires dropping locks, we must * loop waiting for the condition to be true. */ while (head->sol_incqlen > head->sol_qlimit) { struct socket *sp; sp = TAILQ_FIRST(&head->sol_incomp); TAILQ_REMOVE(&head->sol_incomp, sp, so_list); head->sol_incqlen--; SOCK_LOCK(sp); sp->so_qstate = SQ_NONE; sp->so_listen = NULL; SOCK_UNLOCK(sp); sorele_locked(head); /* does SOLISTEN_UNLOCK, head stays */ soabort(sp); SOLISTEN_LOCK(head); } TAILQ_INSERT_TAIL(&head->sol_incomp, so, so_list); so->so_qstate = SQ_INCOMP; head->sol_incqlen++; SOLISTEN_UNLOCK(head); return (false); } } #if defined(SCTP) || defined(SCTP_SUPPORT) /* * Socket part of sctp_peeloff(). Detach a new socket from an * association. The new socket is returned with a reference. * * XXXGL: reduce copy-paste with solisten_clone(). */ struct socket * sopeeloff(struct socket *head) { struct socket *so; VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p", __func__, __LINE__, head)); so = soalloc(head->so_vnet); if (so == NULL) { log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: " "limit reached or out of memory\n", __func__, head->so_pcb); return (NULL); } so->so_type = head->so_type; so->so_options = head->so_options; so->so_linger = head->so_linger; so->so_state = (head->so_state & SS_NBIO) | SS_ISCONNECTED; so->so_fibnum = head->so_fibnum; so->so_proto = head->so_proto; so->so_cred = crhold(head->so_cred); #ifdef MAC mac_socket_newconn(head, so); #endif knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock, so_rdknl_assert_lock); knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock, so_wrknl_assert_lock); VNET_SO_ASSERT(head); if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) { sodealloc(so); log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n", __func__, head->so_pcb); return (NULL); } if ((*so->so_proto->pr_attach)(so, 0, NULL)) { sodealloc(so); log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n", __func__, head->so_pcb); return (NULL); } so->so_rcv.sb_lowat = head->so_rcv.sb_lowat; so->so_snd.sb_lowat = head->so_snd.sb_lowat; so->so_rcv.sb_timeo = head->so_rcv.sb_timeo; so->so_snd.sb_timeo = head->so_snd.sb_timeo; so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE; so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE; soref(so); return (so); } #endif /* SCTP */ int sobind(struct socket *so, struct sockaddr *nam, struct thread *td) { int error; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_bind(so, nam, td); CURVNET_RESTORE(); return (error); } int sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { int error; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_bindat(fd, so, nam, td); CURVNET_RESTORE(); return (error); } /* * solisten() transitions a socket from a non-listening state to a listening * state, but can also be used to update the listen queue depth on an * existing listen socket. The protocol will call back into the sockets * layer using solisten_proto_check() and solisten_proto() to check and set * socket-layer listen state. Call backs are used so that the protocol can * acquire both protocol and socket layer locks in whatever order is required * by the protocol. * * Protocol implementors are advised to hold the socket lock across the * socket-layer test and set to avoid races at the socket layer. */ int solisten(struct socket *so, int backlog, struct thread *td) { int error; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_listen(so, backlog, td); CURVNET_RESTORE(); return (error); } /* * Prepare for a call to solisten_proto(). Acquire all socket buffer locks in * order to interlock with socket I/O. */ int solisten_proto_check(struct socket *so) { SOCK_LOCK_ASSERT(so); if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) != 0) return (EINVAL); /* * Sleeping is not permitted here, so simply fail if userspace is * attempting to transmit or receive on the socket. This kind of * transient failure is not ideal, but it should occur only if userspace * is misusing the socket interfaces. */ if (!sx_try_xlock(&so->so_snd_sx)) return (EAGAIN); if (!sx_try_xlock(&so->so_rcv_sx)) { sx_xunlock(&so->so_snd_sx); return (EAGAIN); } mtx_lock(&so->so_snd_mtx); mtx_lock(&so->so_rcv_mtx); /* Interlock with soo_aio_queue() and KTLS. */ if (!SOLISTENING(so)) { bool ktls; #ifdef KERN_TLS ktls = so->so_snd.sb_tls_info != NULL || so->so_rcv.sb_tls_info != NULL; #else ktls = false; #endif if (ktls || (so->so_snd.sb_flags & (SB_AIO | SB_AIO_RUNNING)) != 0 || (so->so_rcv.sb_flags & (SB_AIO | SB_AIO_RUNNING)) != 0) { solisten_proto_abort(so); return (EINVAL); } } return (0); } /* * Undo the setup done by solisten_proto_check(). */ void solisten_proto_abort(struct socket *so) { mtx_unlock(&so->so_snd_mtx); mtx_unlock(&so->so_rcv_mtx); sx_xunlock(&so->so_snd_sx); sx_xunlock(&so->so_rcv_sx); } void solisten_proto(struct socket *so, int backlog) { int sbrcv_lowat, sbsnd_lowat; u_int sbrcv_hiwat, sbsnd_hiwat; short sbrcv_flags, sbsnd_flags; sbintime_t sbrcv_timeo, sbsnd_timeo; SOCK_LOCK_ASSERT(so); KASSERT((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0, ("%s: bad socket state %p", __func__, so)); if (SOLISTENING(so)) goto listening; /* * Change this socket to listening state. */ sbrcv_lowat = so->so_rcv.sb_lowat; sbsnd_lowat = so->so_snd.sb_lowat; sbrcv_hiwat = so->so_rcv.sb_hiwat; sbsnd_hiwat = so->so_snd.sb_hiwat; sbrcv_flags = so->so_rcv.sb_flags; sbsnd_flags = so->so_snd.sb_flags; sbrcv_timeo = so->so_rcv.sb_timeo; sbsnd_timeo = so->so_snd.sb_timeo; sbdestroy(so, SO_SND); sbdestroy(so, SO_RCV); #ifdef INVARIANTS bzero(&so->so_rcv, sizeof(struct socket) - offsetof(struct socket, so_rcv)); #endif so->sol_sbrcv_lowat = sbrcv_lowat; so->sol_sbsnd_lowat = sbsnd_lowat; so->sol_sbrcv_hiwat = sbrcv_hiwat; so->sol_sbsnd_hiwat = sbsnd_hiwat; so->sol_sbrcv_flags = sbrcv_flags; so->sol_sbsnd_flags = sbsnd_flags; so->sol_sbrcv_timeo = sbrcv_timeo; so->sol_sbsnd_timeo = sbsnd_timeo; so->sol_qlen = so->sol_incqlen = 0; TAILQ_INIT(&so->sol_incomp); TAILQ_INIT(&so->sol_comp); so->sol_accept_filter = NULL; so->sol_accept_filter_arg = NULL; so->sol_accept_filter_str = NULL; so->sol_upcall = NULL; so->sol_upcallarg = NULL; so->so_options |= SO_ACCEPTCONN; listening: if (backlog < 0 || backlog > somaxconn) backlog = somaxconn; so->sol_qlimit = backlog; mtx_unlock(&so->so_snd_mtx); mtx_unlock(&so->so_rcv_mtx); sx_xunlock(&so->so_snd_sx); sx_xunlock(&so->so_rcv_sx); } /* * Wakeup listeners/subsystems once we have a complete connection. * Enters with lock, returns unlocked. */ void solisten_wakeup(struct socket *sol) { if (sol->sol_upcall != NULL) (void )sol->sol_upcall(sol, sol->sol_upcallarg, M_NOWAIT); else { selwakeuppri(&sol->so_rdsel, PSOCK); KNOTE_LOCKED(&sol->so_rdsel.si_note, 0); } SOLISTEN_UNLOCK(sol); wakeup_one(&sol->sol_comp); if ((sol->so_state & SS_ASYNC) && sol->so_sigio != NULL) pgsigio(&sol->so_sigio, SIGIO, 0); } /* * Return single connection off a listening socket queue. Main consumer of * the function is kern_accept4(). Some modules, that do their own accept * management also use the function. The socket reference held by the * listen queue is handed to the caller. * * Listening socket must be locked on entry and is returned unlocked on * return. * The flags argument is set of accept4(2) flags and ACCEPT4_INHERIT. */ int solisten_dequeue(struct socket *head, struct socket **ret, int flags) { struct socket *so; int error; SOLISTEN_LOCK_ASSERT(head); while (!(head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp) && head->so_error == 0) { error = msleep(&head->sol_comp, SOCK_MTX(head), PSOCK | PCATCH, "accept", 0); if (error != 0) { SOLISTEN_UNLOCK(head); return (error); } } if (head->so_error) { error = head->so_error; head->so_error = 0; } else if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp)) error = EWOULDBLOCK; else error = 0; if (error) { SOLISTEN_UNLOCK(head); return (error); } so = TAILQ_FIRST(&head->sol_comp); SOCK_LOCK(so); KASSERT(so->so_qstate == SQ_COMP, ("%s: so %p not SQ_COMP", __func__, so)); head->sol_qlen--; so->so_qstate = SQ_NONE; so->so_listen = NULL; TAILQ_REMOVE(&head->sol_comp, so, so_list); if (flags & ACCEPT4_INHERIT) so->so_state |= (head->so_state & SS_NBIO); else so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0; SOCK_UNLOCK(so); sorele_locked(head); *ret = so; return (0); } /* * Free socket upon release of the very last reference. */ static void sofree(struct socket *so) { struct protosw *pr = so->so_proto; SOCK_LOCK_ASSERT(so); KASSERT(refcount_load(&so->so_count) == 0, ("%s: so %p has references", __func__, so)); KASSERT(SOLISTENING(so) || so->so_qstate == SQ_NONE, ("%s: so %p is on listen queue", __func__, so)); SOCK_UNLOCK(so); if (so->so_dtor != NULL) so->so_dtor(so); VNET_SO_ASSERT(so); if ((pr->pr_flags & PR_RIGHTS) && !SOLISTENING(so)) { MPASS(pr->pr_domain->dom_dispose != NULL); (*pr->pr_domain->dom_dispose)(so); } if (pr->pr_detach != NULL) pr->pr_detach(so); /* * From this point on, we assume that no other references to this * socket exist anywhere else in the stack. Therefore, no locks need * to be acquired or held. */ if (!(pr->pr_flags & PR_SOCKBUF) && !SOLISTENING(so)) { sbdestroy(so, SO_SND); sbdestroy(so, SO_RCV); } seldrain(&so->so_rdsel); seldrain(&so->so_wrsel); knlist_destroy(&so->so_rdsel.si_note); knlist_destroy(&so->so_wrsel.si_note); sodealloc(so); } /* * Release a reference on a socket while holding the socket lock. * Unlocks the socket lock before returning. */ void sorele_locked(struct socket *so) { SOCK_LOCK_ASSERT(so); if (refcount_release(&so->so_count)) sofree(so); else SOCK_UNLOCK(so); } /* * Close a socket on last file table reference removal. Initiate disconnect * if connected. Free socket when disconnect complete. * * This function will sorele() the socket. Note that soclose() may be called * prior to the ref count reaching zero. The actual socket structure will * not be freed until the ref count reaches zero. */ int soclose(struct socket *so) { struct accept_queue lqueue; int error = 0; bool listening, last __diagused; CURVNET_SET(so->so_vnet); funsetown(&so->so_sigio); if (so->so_state & SS_ISCONNECTED) { if ((so->so_state & SS_ISDISCONNECTING) == 0) { error = sodisconnect(so); if (error) { if (error == ENOTCONN) error = 0; goto drop; } } if ((so->so_options & SO_LINGER) != 0 && so->so_linger != 0) { if ((so->so_state & SS_ISDISCONNECTING) && (so->so_state & SS_NBIO)) goto drop; while (so->so_state & SS_ISCONNECTED) { error = tsleep(&so->so_timeo, PSOCK | PCATCH, "soclos", so->so_linger * hz); if (error) break; } } } drop: if (so->so_proto->pr_close != NULL) so->so_proto->pr_close(so); SOCK_LOCK(so); if ((listening = SOLISTENING(so))) { struct socket *sp; TAILQ_INIT(&lqueue); TAILQ_SWAP(&lqueue, &so->sol_incomp, socket, so_list); TAILQ_CONCAT(&lqueue, &so->sol_comp, so_list); so->sol_qlen = so->sol_incqlen = 0; TAILQ_FOREACH(sp, &lqueue, so_list) { SOCK_LOCK(sp); sp->so_qstate = SQ_NONE; sp->so_listen = NULL; SOCK_UNLOCK(sp); last = refcount_release(&so->so_count); KASSERT(!last, ("%s: released last reference for %p", __func__, so)); } } sorele_locked(so); if (listening) { struct socket *sp, *tsp; TAILQ_FOREACH_SAFE(sp, &lqueue, so_list, tsp) soabort(sp); } CURVNET_RESTORE(); return (error); } /* * soabort() is used to abruptly tear down a connection, such as when a * resource limit is reached (listen queue depth exceeded), or if a listen * socket is closed while there are sockets waiting to be accepted. * * This interface is tricky, because it is called on an unreferenced socket, * and must be called only by a thread that has actually removed the socket * from the listen queue it was on. Likely this thread holds the last * reference on the socket and soabort() will proceed with sofree(). But * it might be not the last, as the sockets on the listen queues are seen * from the protocol side. * * This interface will call into the protocol code, so must not be called * with any socket locks held. Protocols do call it while holding their own * recursible protocol mutexes, but this is something that should be subject * to review in the future. * * Usually socket should have a single reference left, but this is not a * requirement. In the past, when we have had named references for file * descriptor and protocol, we asserted that none of them are being held. */ void soabort(struct socket *so) { VNET_SO_ASSERT(so); if (so->so_proto->pr_abort != NULL) so->so_proto->pr_abort(so); SOCK_LOCK(so); sorele_locked(so); } int soaccept(struct socket *so, struct sockaddr *sa) { #ifdef INVARIANTS u_char len = sa->sa_len; #endif int error; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_accept(so, sa); KASSERT(sa->sa_len <= len, ("%s: protocol %p sockaddr overflow", __func__, so->so_proto)); CURVNET_RESTORE(); return (error); } +int +sopeeraddr(struct socket *so, struct sockaddr *sa) +{ +#ifdef INVARIANTS + u_char len = sa->sa_len; +#endif + int error; + + CURVNET_SET(so->so_vnet); + error = so->so_proto->pr_peeraddr(so, sa); + KASSERT(sa->sa_len <= len, + ("%s: protocol %p sockaddr overflow", __func__, so->so_proto)); + CURVNET_RESTORE(); + + return (error); +} + +int +sosockaddr(struct socket *so, struct sockaddr *sa) +{ +#ifdef INVARIANTS + u_char len = sa->sa_len; +#endif + int error; + + CURVNET_SET(so->so_vnet); + error = so->so_proto->pr_sockaddr(so, sa); + KASSERT(sa->sa_len <= len, + ("%s: protocol %p sockaddr overflow", __func__, so->so_proto)); + CURVNET_RESTORE(); + + return (error); +} + int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) { return (soconnectat(AT_FDCWD, so, nam, td)); } int soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { int error; CURVNET_SET(so->so_vnet); /* * If protocol is connection-based, can only connect once. * Otherwise, if connected, try to disconnect first. This allows * user to disconnect by connecting to, e.g., a null address. * * Note, this check is racy and may need to be re-evaluated at the * protocol layer. */ if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && ((so->so_proto->pr_flags & PR_CONNREQUIRED) || (error = sodisconnect(so)))) { error = EISCONN; } else { /* * Prevent accumulated error from previous connection from * biting us. */ so->so_error = 0; if (fd == AT_FDCWD) { error = so->so_proto->pr_connect(so, nam, td); } else { error = so->so_proto->pr_connectat(fd, so, nam, td); } } CURVNET_RESTORE(); return (error); } int soconnect2(struct socket *so1, struct socket *so2) { int error; CURVNET_SET(so1->so_vnet); error = so1->so_proto->pr_connect2(so1, so2); CURVNET_RESTORE(); return (error); } int sodisconnect(struct socket *so) { int error; if ((so->so_state & SS_ISCONNECTED) == 0) return (ENOTCONN); if (so->so_state & SS_ISDISCONNECTING) return (EALREADY); VNET_SO_ASSERT(so); error = so->so_proto->pr_disconnect(so); return (error); } int sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { long space; ssize_t resid; int clen = 0, error, dontroute; KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM")); KASSERT(so->so_proto->pr_flags & PR_ATOMIC, ("sosend_dgram: !PR_ATOMIC")); if (uio != NULL) resid = uio->uio_resid; else resid = top->m_pkthdr.len; /* * In theory resid should be unsigned. However, space must be * signed, as it might be less than 0 if we over-committed, and we * must use a signed comparison of space and resid. On the other * hand, a negative resid causes us to loop sending 0-length * segments to the protocol. */ if (resid < 0) { error = EINVAL; goto out; } dontroute = (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; if (td != NULL) td->td_ru.ru_msgsnd++; if (control != NULL) clen = control->m_len; SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCKBUF_UNLOCK(&so->so_snd); error = EPIPE; goto out; } if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_snd); goto out; } if ((so->so_state & SS_ISCONNECTED) == 0) { /* * `sendto' and `sendmsg' is allowed on a connection-based * socket if it supports implied connect. Return ENOTCONN if * not connected and no address is supplied. */ if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { if ((so->so_state & SS_ISCONFIRMING) == 0 && !(resid == 0 && clen != 0)) { SOCKBUF_UNLOCK(&so->so_snd); error = ENOTCONN; goto out; } } else if (addr == NULL) { if (so->so_proto->pr_flags & PR_CONNREQUIRED) error = ENOTCONN; else error = EDESTADDRREQ; SOCKBUF_UNLOCK(&so->so_snd); goto out; } } /* * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a * problem and need fixing. */ space = sbspace(&so->so_snd); if (flags & MSG_OOB) space += 1024; space -= clen; SOCKBUF_UNLOCK(&so->so_snd); if (resid > space) { error = EMSGSIZE; goto out; } if (uio == NULL) { resid = 0; if (flags & MSG_EOR) top->m_flags |= M_EOR; } else { /* * Copy the data from userland into a mbuf chain. * If no data is to be copied in, a single empty mbuf * is returned. */ top = m_uiotombuf(uio, M_WAITOK, space, max_hdr, (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0))); if (top == NULL) { error = EFAULT; /* only possible error */ goto out; } space -= resid - uio->uio_resid; resid = uio->uio_resid; } KASSERT(resid == 0, ("sosend_dgram: resid != 0")); /* * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock * than with. */ if (dontroute) { SOCK_LOCK(so); so->so_options |= SO_DONTROUTE; SOCK_UNLOCK(so); } /* * XXX all the SBS_CANTSENDMORE checks previously done could be out * of date. We could have received a reset packet in an interrupt or * maybe we slept while doing page faults in uiomove() etc. We could * probably recheck again inside the locking protection here, but * there are probably other places that this also happens. We must * rethink this. */ VNET_SO_ASSERT(so); error = so->so_proto->pr_send(so, (flags & MSG_OOB) ? PRUS_OOB : /* * If the user set MSG_EOF, the protocol understands this flag and * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND. */ ((flags & MSG_EOF) && (so->so_proto->pr_flags & PR_IMPLOPCL) && (resid <= 0)) ? PRUS_EOF : /* If there is more to send set PRUS_MORETOCOME */ (flags & MSG_MORETOCOME) || (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, top, addr, control, td); if (dontroute) { SOCK_LOCK(so); so->so_options &= ~SO_DONTROUTE; SOCK_UNLOCK(so); } clen = 0; control = NULL; top = NULL; out: if (top != NULL) m_freem(top); if (control != NULL) m_freem(control); return (error); } /* * Send on a socket. If send must go all at once and message is larger than * send buffering, then hard error. Lock against other senders. If must go * all at once and not enough room now, then inform user that this would * block and do nothing. Otherwise, if nonblocking, send as much as * possible. The data to be sent is described by "uio" if nonzero, otherwise * by the mbuf chain "top" (which must be null if uio is not). Data provided * in mbuf chain must be small enough to send all at once. * * Returns nonzero on error, timeout or signal; callers must check for short * counts if EINTR/ERESTART are returned. Data and control buffers are freed * on return. */ int sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { long space; ssize_t resid; int clen = 0, error, dontroute; int atomic = sosendallatonce(so) || top; int pr_send_flag; #ifdef KERN_TLS struct ktls_session *tls; int tls_enq_cnt, tls_send_flag; uint8_t tls_rtype; tls = NULL; tls_rtype = TLS_RLTYPE_APP; #endif if (uio != NULL) resid = uio->uio_resid; else if ((top->m_flags & M_PKTHDR) != 0) resid = top->m_pkthdr.len; else resid = m_length(top, NULL); /* * In theory resid should be unsigned. However, space must be * signed, as it might be less than 0 if we over-committed, and we * must use a signed comparison of space and resid. On the other * hand, a negative resid causes us to loop sending 0-length * segments to the protocol. * * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM * type sockets since that's an error. */ if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { error = EINVAL; goto out; } dontroute = (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && (so->so_proto->pr_flags & PR_ATOMIC); if (td != NULL) td->td_ru.ru_msgsnd++; if (control != NULL) clen = control->m_len; error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); if (error) goto out; #ifdef KERN_TLS tls_send_flag = 0; tls = ktls_hold(so->so_snd.sb_tls_info); if (tls != NULL) { if (tls->mode == TCP_TLS_MODE_SW) tls_send_flag = PRUS_NOTREADY; if (control != NULL) { struct cmsghdr *cm = mtod(control, struct cmsghdr *); if (clen >= sizeof(*cm) && cm->cmsg_type == TLS_SET_RECORD_TYPE) { tls_rtype = *((uint8_t *)CMSG_DATA(cm)); clen = 0; m_freem(control); control = NULL; atomic = 1; } } if (resid == 0 && !ktls_permit_empty_frames(tls)) { error = EINVAL; goto release; } } #endif restart: do { SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCKBUF_UNLOCK(&so->so_snd); error = EPIPE; goto release; } if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_snd); goto release; } if ((so->so_state & SS_ISCONNECTED) == 0) { /* * `sendto' and `sendmsg' is allowed on a connection- * based socket if it supports implied connect. * Return ENOTCONN if not connected and no address is * supplied. */ if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { if ((so->so_state & SS_ISCONFIRMING) == 0 && !(resid == 0 && clen != 0)) { SOCKBUF_UNLOCK(&so->so_snd); error = ENOTCONN; goto release; } } else if (addr == NULL) { SOCKBUF_UNLOCK(&so->so_snd); if (so->so_proto->pr_flags & PR_CONNREQUIRED) error = ENOTCONN; else error = EDESTADDRREQ; goto release; } } space = sbspace(&so->so_snd); if (flags & MSG_OOB) space += 1024; if ((atomic && resid > so->so_snd.sb_hiwat) || clen > so->so_snd.sb_hiwat) { SOCKBUF_UNLOCK(&so->so_snd); error = EMSGSIZE; goto release; } if (space < resid + clen && (atomic || space < so->so_snd.sb_lowat || space < clen)) { if ((so->so_state & SS_NBIO) || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) { SOCKBUF_UNLOCK(&so->so_snd); error = EWOULDBLOCK; goto release; } error = sbwait(so, SO_SND); SOCKBUF_UNLOCK(&so->so_snd); if (error) goto release; goto restart; } SOCKBUF_UNLOCK(&so->so_snd); space -= clen; do { if (uio == NULL) { resid = 0; if (flags & MSG_EOR) top->m_flags |= M_EOR; #ifdef KERN_TLS if (tls != NULL) { ktls_frame(top, tls, &tls_enq_cnt, tls_rtype); tls_rtype = TLS_RLTYPE_APP; } #endif } else { /* * Copy the data from userland into a mbuf * chain. If resid is 0, which can happen * only if we have control to send, then * a single empty mbuf is returned. This * is a workaround to prevent protocol send * methods to panic. */ #ifdef KERN_TLS if (tls != NULL) { top = m_uiotombuf(uio, M_WAITOK, space, tls->params.max_frame_len, M_EXTPG | ((flags & MSG_EOR) ? M_EOR : 0)); if (top != NULL) { ktls_frame(top, tls, &tls_enq_cnt, tls_rtype); } tls_rtype = TLS_RLTYPE_APP; } else #endif top = m_uiotombuf(uio, M_WAITOK, space, (atomic ? max_hdr : 0), (atomic ? M_PKTHDR : 0) | ((flags & MSG_EOR) ? M_EOR : 0)); if (top == NULL) { error = EFAULT; /* only possible error */ goto release; } space -= resid - uio->uio_resid; resid = uio->uio_resid; } if (dontroute) { SOCK_LOCK(so); so->so_options |= SO_DONTROUTE; SOCK_UNLOCK(so); } /* * XXX all the SBS_CANTSENDMORE checks previously * done could be out of date. We could have received * a reset packet in an interrupt or maybe we slept * while doing page faults in uiomove() etc. We * could probably recheck again inside the locking * protection here, but there are probably other * places that this also happens. We must rethink * this. */ VNET_SO_ASSERT(so); pr_send_flag = (flags & MSG_OOB) ? PRUS_OOB : /* * If the user set MSG_EOF, the protocol understands * this flag and nothing left to send then use * PRU_SEND_EOF instead of PRU_SEND. */ ((flags & MSG_EOF) && (so->so_proto->pr_flags & PR_IMPLOPCL) && (resid <= 0)) ? PRUS_EOF : /* If there is more to send set PRUS_MORETOCOME. */ (flags & MSG_MORETOCOME) || (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0; #ifdef KERN_TLS pr_send_flag |= tls_send_flag; #endif error = so->so_proto->pr_send(so, pr_send_flag, top, addr, control, td); if (dontroute) { SOCK_LOCK(so); so->so_options &= ~SO_DONTROUTE; SOCK_UNLOCK(so); } #ifdef KERN_TLS if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) { if (error != 0) { m_freem(top); top = NULL; } else { soref(so); ktls_enqueue(top, so, tls_enq_cnt); } } #endif clen = 0; control = NULL; top = NULL; if (error) goto release; } while (resid && space > 0); } while (resid); release: SOCK_IO_SEND_UNLOCK(so); out: #ifdef KERN_TLS if (tls != NULL) ktls_free(tls); #endif if (top != NULL) m_freem(top); if (control != NULL) m_freem(control); return (error); } /* * Send to a socket from a kernel thread. * * XXXGL: in almost all cases uio is NULL and the mbuf is supplied. * Exception is nfs/bootp_subr.c. It is arguable that the VNET context needs * to be set at all. This function should just boil down to a static inline * calling the protocol method. */ int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { int error; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_sosend(so, addr, uio, top, control, flags, td); CURVNET_RESTORE(); return (error); } /* * send(2), write(2) or aio_write(2) on a socket. */ int sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *control, int flags, struct proc *userproc) { struct thread *td; ssize_t len; int error; td = uio->uio_td; len = uio->uio_resid; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_sosend(so, addr, uio, NULL, control, flags, td); CURVNET_RESTORE(); if (error != 0) { /* * Clear transient errors for stream protocols if they made * some progress. Make exclusion for aio(4) that would * schedule a new write in case of EWOULDBLOCK and clear * error itself. See soaio_process_job(). */ if (uio->uio_resid != len && (so->so_proto->pr_flags & PR_ATOMIC) == 0 && userproc == NULL && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; /* Generation of SIGPIPE can be controlled per socket. */ if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0 && (flags & MSG_NOSIGNAL) == 0) { if (userproc != NULL) { /* aio(4) job */ PROC_LOCK(userproc); kern_psignal(userproc, SIGPIPE); PROC_UNLOCK(userproc); } else { PROC_LOCK(td->td_proc); tdsignal(td, SIGPIPE); PROC_UNLOCK(td->td_proc); } } } return (error); } /* * The part of soreceive() that implements reading non-inline out-of-band * data from a socket. For more complete comments, see soreceive(), from * which this code originated. * * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is * unable to return an mbuf chain to the caller. */ static int soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) { struct protosw *pr = so->so_proto; struct mbuf *m; int error; KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); VNET_SO_ASSERT(so); m = m_get(M_WAITOK, MT_DATA); error = pr->pr_rcvoob(so, m, flags & MSG_PEEK); if (error) goto bad; do { error = uiomove(mtod(m, void *), (int) min(uio->uio_resid, m->m_len), uio); m = m_free(m); } while (uio->uio_resid && error == 0 && m); bad: if (m != NULL) m_freem(m); return (error); } /* * Following replacement or removal of the first mbuf on the first mbuf chain * of a socket buffer, push necessary state changes back into the socket * buffer so that other consumers see the values consistently. 'nextrecord' * is the callers locally stored value of the original value of * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. * NOTE: 'nextrecord' may be NULL. */ static __inline void sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) { SOCKBUF_LOCK_ASSERT(sb); /* * First, update for the new value of nextrecord. If necessary, make * it the first record. */ if (sb->sb_mb != NULL) sb->sb_mb->m_nextpkt = nextrecord; else sb->sb_mb = nextrecord; /* * Now update any dependent socket buffer fields to reflect the new * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the * addition of a second clause that takes care of the case where * sb_mb has been updated, but remains the last record. */ if (sb->sb_mb == NULL) { sb->sb_mbtail = NULL; sb->sb_lastrecord = NULL; } else if (sb->sb_mb->m_nextpkt == NULL) sb->sb_lastrecord = sb->sb_mb; } /* * Implement receive operations on a socket. We depend on the way that * records are added to the sockbuf by sbappend. In particular, each record * (mbufs linked through m_next) must begin with an address if the protocol * so specifies, followed by an optional mbuf or mbufs containing ancillary * data, and then zero or more mbufs of data. In order to allow parallelism * between network receive and copying to user space, as well as avoid * sleeping with a mutex held, we release the socket buffer mutex during the * user space copy. Although the sockbuf is locked, new data may still be * appended, and thus we must maintain consistency of the sockbuf during that * time. * * The caller may receive the data as a single mbuf chain by supplying an * mbuf **mp0 for use in returning the chain. The uio is then used only for * the count in uio_resid. */ int soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { struct mbuf *m, **mp; int flags, error, offset; ssize_t len; struct protosw *pr = so->so_proto; struct mbuf *nextrecord; int moff, type = 0; ssize_t orig_resid = uio->uio_resid; bool report_real_len = false; mp = mp0; if (psa != NULL) *psa = NULL; if (controlp != NULL) *controlp = NULL; if (flagsp != NULL) { report_real_len = *flagsp & MSG_TRUNC; *flagsp &= ~MSG_TRUNC; flags = *flagsp &~ MSG_EOR; } else flags = 0; if (flags & MSG_OOB) return (soreceive_rcvoob(so, uio, flags)); if (mp != NULL) *mp = NULL; if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) && uio->uio_resid) { VNET_SO_ASSERT(so); pr->pr_rcvd(so, 0); } error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (error) return (error); restart: SOCKBUF_LOCK(&so->so_rcv); m = so->so_rcv.sb_mb; /* * If we have less data than requested, block awaiting more (subject * to any timeout) if: * 1. the current count is less than the low water mark, or * 2. MSG_DONTWAIT is not set */ if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && sbavail(&so->so_rcv) < uio->uio_resid) && sbavail(&so->so_rcv) < so->so_rcv.sb_lowat && m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { KASSERT(m != NULL || !sbavail(&so->so_rcv), ("receive: m == %p sbavail == %u", m, sbavail(&so->so_rcv))); if (so->so_error || so->so_rerror) { if (m != NULL) goto dontblock; if (so->so_error) error = so->so_error; else error = so->so_rerror; if ((flags & MSG_PEEK) == 0) { if (so->so_error) so->so_error = 0; else so->so_rerror = 0; } SOCKBUF_UNLOCK(&so->so_rcv); goto release; } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { if (m != NULL) goto dontblock; #ifdef KERN_TLS else if (so->so_rcv.sb_tlsdcc == 0 && so->so_rcv.sb_tlscc == 0) { #else else { #endif SOCKBUF_UNLOCK(&so->so_rcv); goto release; } } for (; m != NULL; m = m->m_next) if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { m = so->so_rcv.sb_mb; goto dontblock; } if ((so->so_state & (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING | SS_ISDISCONNECTED)) == 0 && (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) { SOCKBUF_UNLOCK(&so->so_rcv); error = ENOTCONN; goto release; } if (uio->uio_resid == 0 && !report_real_len) { SOCKBUF_UNLOCK(&so->so_rcv); goto release; } if ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO))) { SOCKBUF_UNLOCK(&so->so_rcv); error = EWOULDBLOCK; goto release; } SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); error = sbwait(so, SO_RCV); SOCKBUF_UNLOCK(&so->so_rcv); if (error) goto release; goto restart; } dontblock: /* * From this point onward, we maintain 'nextrecord' as a cache of the * pointer to the next record in the socket buffer. We must keep the * various socket buffer pointers and local stack versions of the * pointers in sync, pushing out modifications before dropping the * socket buffer mutex, and re-reading them when picking it up. * * Otherwise, we will race with the network stack appending new data * or records onto the socket buffer by using inconsistent/stale * versions of the field, possibly resulting in socket buffer * corruption. * * By holding the high-level sblock(), we prevent simultaneous * readers from pulling off the front of the socket buffer. */ SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); nextrecord = m->m_nextpkt; if (pr->pr_flags & PR_ADDR) { KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); orig_resid = 0; if (psa != NULL) *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_NOWAIT); if (flags & MSG_PEEK) { m = m->m_next; } else { sbfree(&so->so_rcv, m); so->so_rcv.sb_mb = m_free(m); m = so->so_rcv.sb_mb; sockbuf_pushsync(&so->so_rcv, nextrecord); } } /* * Process one or more MT_CONTROL mbufs present before any data mbufs * in the first mbuf chain on the socket buffer. If MSG_PEEK, we * just copy the data; if !MSG_PEEK, we call into the protocol to * perform externalization (or freeing if controlp == NULL). */ if (m != NULL && m->m_type == MT_CONTROL) { struct mbuf *cm = NULL, *cmn; struct mbuf **cme = &cm; #ifdef KERN_TLS struct cmsghdr *cmsg; struct tls_get_record tgr; /* * For MSG_TLSAPPDATA, check for an alert record. * If found, return ENXIO without removing * it from the receive queue. This allows a subsequent * call without MSG_TLSAPPDATA to receive it. * Note that, for TLS, there should only be a single * control mbuf with the TLS_GET_RECORD message in it. */ if (flags & MSG_TLSAPPDATA) { cmsg = mtod(m, struct cmsghdr *); if (cmsg->cmsg_type == TLS_GET_RECORD && cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) { memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr)); if (__predict_false(tgr.tls_type == TLS_RLTYPE_ALERT)) { SOCKBUF_UNLOCK(&so->so_rcv); error = ENXIO; goto release; } } } #endif do { if (flags & MSG_PEEK) { if (controlp != NULL) { *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); controlp = &(*controlp)->m_next; } m = m->m_next; } else { sbfree(&so->so_rcv, m); so->so_rcv.sb_mb = m->m_next; m->m_next = NULL; *cme = m; cme = &(*cme)->m_next; m = so->so_rcv.sb_mb; } } while (m != NULL && m->m_type == MT_CONTROL); if ((flags & MSG_PEEK) == 0) sockbuf_pushsync(&so->so_rcv, nextrecord); while (cm != NULL) { cmn = cm->m_next; cm->m_next = NULL; if (pr->pr_domain->dom_externalize != NULL) { SOCKBUF_UNLOCK(&so->so_rcv); VNET_SO_ASSERT(so); error = (*pr->pr_domain->dom_externalize) (cm, controlp, flags); SOCKBUF_LOCK(&so->so_rcv); } else if (controlp != NULL) *controlp = cm; else m_freem(cm); if (controlp != NULL) { while (*controlp != NULL) controlp = &(*controlp)->m_next; } cm = cmn; } if (m != NULL) nextrecord = so->so_rcv.sb_mb->m_nextpkt; else nextrecord = so->so_rcv.sb_mb; orig_resid = 0; } if (m != NULL) { if ((flags & MSG_PEEK) == 0) { KASSERT(m->m_nextpkt == nextrecord, ("soreceive: post-control, nextrecord !sync")); if (nextrecord == NULL) { KASSERT(so->so_rcv.sb_mb == m, ("soreceive: post-control, sb_mb!=m")); KASSERT(so->so_rcv.sb_lastrecord == m, ("soreceive: post-control, lastrecord!=m")); } } type = m->m_type; if (type == MT_OOBDATA) flags |= MSG_OOB; } else { if ((flags & MSG_PEEK) == 0) { KASSERT(so->so_rcv.sb_mb == nextrecord, ("soreceive: sb_mb != nextrecord")); if (so->so_rcv.sb_mb == NULL) { KASSERT(so->so_rcv.sb_lastrecord == NULL, ("soreceive: sb_lastercord != NULL")); } } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); /* * Now continue to read any data mbufs off of the head of the socket * buffer until the read request is satisfied. Note that 'type' is * used to store the type of any mbuf reads that have happened so far * such that soreceive() can stop reading if the type changes, which * causes soreceive() to return only one of regular data and inline * out-of-band data in a single socket receive operation. */ moff = 0; offset = 0; while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0 && error == 0) { /* * If the type of mbuf has changed since the last mbuf * examined ('type'), end the receive operation. */ SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) { if (type != m->m_type) break; } else if (type == MT_OOBDATA) break; else KASSERT(m->m_type == MT_DATA, ("m->m_type == %d", m->m_type)); so->so_rcv.sb_state &= ~SBS_RCVATMARK; len = uio->uio_resid; if (so->so_oobmark && len > so->so_oobmark - offset) len = so->so_oobmark - offset; if (len > m->m_len - moff) len = m->m_len - moff; /* * If mp is set, just pass back the mbufs. Otherwise copy * them out via the uio, then free. Sockbuf must be * consistent here (points to current mbuf, it points to next * record) when we drop priority; we must note any additions * to the sockbuf when we block interrupts again. */ if (mp == NULL) { SOCKBUF_LOCK_ASSERT(&so->so_rcv); SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); if ((m->m_flags & M_EXTPG) != 0) error = m_unmapped_uiomove(m, moff, uio, (int)len); else error = uiomove(mtod(m, char *) + moff, (int)len, uio); SOCKBUF_LOCK(&so->so_rcv); if (error) { /* * The MT_SONAME mbuf has already been removed * from the record, so it is necessary to * remove the data mbufs, if any, to preserve * the invariant in the case of PR_ADDR that * requires MT_SONAME mbufs at the head of * each record. */ if (pr->pr_flags & PR_ATOMIC && ((flags & MSG_PEEK) == 0)) (void)sbdroprecord_locked(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); goto release; } } else uio->uio_resid -= len; SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (len == m->m_len - moff) { if (m->m_flags & M_EOR) flags |= MSG_EOR; if (flags & MSG_PEEK) { m = m->m_next; moff = 0; } else { nextrecord = m->m_nextpkt; sbfree(&so->so_rcv, m); if (mp != NULL) { m->m_nextpkt = NULL; *mp = m; mp = &m->m_next; so->so_rcv.sb_mb = m = m->m_next; *mp = NULL; } else { so->so_rcv.sb_mb = m_free(m); m = so->so_rcv.sb_mb; } sockbuf_pushsync(&so->so_rcv, nextrecord); SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); } } else { if (flags & MSG_PEEK) moff += len; else { if (mp != NULL) { if (flags & MSG_DONTWAIT) { *mp = m_copym(m, 0, len, M_NOWAIT); if (*mp == NULL) { /* * m_copym() couldn't * allocate an mbuf. * Adjust uio_resid back * (it was adjusted * down by len bytes, * which we didn't end * up "copying" over). */ uio->uio_resid += len; break; } } else { SOCKBUF_UNLOCK(&so->so_rcv); *mp = m_copym(m, 0, len, M_WAITOK); SOCKBUF_LOCK(&so->so_rcv); } } sbcut_locked(&so->so_rcv, len); } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (so->so_oobmark) { if ((flags & MSG_PEEK) == 0) { so->so_oobmark -= len; if (so->so_oobmark == 0) { so->so_rcv.sb_state |= SBS_RCVATMARK; break; } } else { offset += len; if (offset == so->so_oobmark) break; } } if (flags & MSG_EOR) break; /* * If the MSG_WAITALL flag is set (for non-atomic socket), we * must not quit until "uio->uio_resid == 0" or an error * termination. If a signal/timeout occurs, return with a * short count but without error. Keep sockbuf locked * against other readers. */ while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && !sosendallatonce(so) && nextrecord == NULL) { SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (so->so_error || so->so_rerror || so->so_rcv.sb_state & SBS_CANTRCVMORE) break; /* * Notify the protocol that some data has been * drained before blocking. */ if (pr->pr_flags & PR_WANTRCVD) { SOCKBUF_UNLOCK(&so->so_rcv); VNET_SO_ASSERT(so); pr->pr_rcvd(so, flags); SOCKBUF_LOCK(&so->so_rcv); if (__predict_false(so->so_rcv.sb_mb == NULL && (so->so_error || so->so_rerror || so->so_rcv.sb_state & SBS_CANTRCVMORE))) break; } SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); /* * We could receive some data while was notifying * the protocol. Skip blocking in this case. */ if (so->so_rcv.sb_mb == NULL) { error = sbwait(so, SO_RCV); if (error) { SOCKBUF_UNLOCK(&so->so_rcv); goto release; } } m = so->so_rcv.sb_mb; if (m != NULL) nextrecord = m->m_nextpkt; } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (m != NULL && pr->pr_flags & PR_ATOMIC) { if (report_real_len) uio->uio_resid -= m_length(m, NULL) - moff; flags |= MSG_TRUNC; if ((flags & MSG_PEEK) == 0) (void) sbdroprecord_locked(&so->so_rcv); } if ((flags & MSG_PEEK) == 0) { if (m == NULL) { /* * First part is an inline SB_EMPTY_FIXUP(). Second * part makes sure sb_lastrecord is up-to-date if * there is still data in the socket buffer. */ so->so_rcv.sb_mb = nextrecord; if (so->so_rcv.sb_mb == NULL) { so->so_rcv.sb_mbtail = NULL; so->so_rcv.sb_lastrecord = NULL; } else if (nextrecord->m_nextpkt == NULL) so->so_rcv.sb_lastrecord = nextrecord; } SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); /* * If soreceive() is being done from the socket callback, * then don't need to generate ACK to peer to update window, * since ACK will be generated on return to TCP. */ if (!(flags & MSG_SOCALLBCK) && (pr->pr_flags & PR_WANTRCVD)) { SOCKBUF_UNLOCK(&so->so_rcv); VNET_SO_ASSERT(so); pr->pr_rcvd(so, flags); SOCKBUF_LOCK(&so->so_rcv); } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (orig_resid == uio->uio_resid && orig_resid && (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { SOCKBUF_UNLOCK(&so->so_rcv); goto restart; } SOCKBUF_UNLOCK(&so->so_rcv); if (flagsp != NULL) *flagsp |= flags; release: SOCK_IO_RECV_UNLOCK(so); return (error); } /* * Optimized version of soreceive() for stream (TCP) sockets. */ int soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { int len = 0, error = 0, flags, oresid; struct sockbuf *sb; struct mbuf *m, *n = NULL; /* We only do stream sockets. */ if (so->so_type != SOCK_STREAM) return (EINVAL); if (psa != NULL) *psa = NULL; if (flagsp != NULL) flags = *flagsp &~ MSG_EOR; else flags = 0; if (controlp != NULL) *controlp = NULL; if (flags & MSG_OOB) return (soreceive_rcvoob(so, uio, flags)); if (mp0 != NULL) *mp0 = NULL; sb = &so->so_rcv; #ifdef KERN_TLS /* * KTLS store TLS records as records with a control message to * describe the framing. * * We check once here before acquiring locks to optimize the * common case. */ if (sb->sb_tls_info != NULL) return (soreceive_generic(so, psa, uio, mp0, controlp, flagsp)); #endif /* Prevent other readers from entering the socket. */ error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (error) return (error); SOCKBUF_LOCK(sb); #ifdef KERN_TLS if (sb->sb_tls_info != NULL) { SOCKBUF_UNLOCK(sb); SOCK_IO_RECV_UNLOCK(so); return (soreceive_generic(so, psa, uio, mp0, controlp, flagsp)); } #endif /* Easy one, no space to copyout anything. */ if (uio->uio_resid == 0) { error = EINVAL; goto out; } oresid = uio->uio_resid; /* We will never ever get anything unless we are or were connected. */ if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { error = ENOTCONN; goto out; } restart: SOCKBUF_LOCK_ASSERT(&so->so_rcv); /* Abort if socket has reported problems. */ if (so->so_error) { if (sbavail(sb) > 0) goto deliver; if (oresid > uio->uio_resid) goto out; error = so->so_error; if (!(flags & MSG_PEEK)) so->so_error = 0; goto out; } /* Door is closed. Deliver what is left, if any. */ if (sb->sb_state & SBS_CANTRCVMORE) { if (sbavail(sb) > 0) goto deliver; else goto out; } /* Socket buffer is empty and we shall not block. */ if (sbavail(sb) == 0 && ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) { error = EAGAIN; goto out; } /* Socket buffer got some data that we shall deliver now. */ if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) && ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)) || sbavail(sb) >= sb->sb_lowat || sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat) ) { goto deliver; } /* On MSG_WAITALL we must wait until all data or error arrives. */ if ((flags & MSG_WAITALL) && (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat)) goto deliver; /* * Wait and block until (more) data comes in. * NB: Drops the sockbuf lock during wait. */ error = sbwait(so, SO_RCV); if (error) goto out; goto restart; deliver: SOCKBUF_LOCK_ASSERT(&so->so_rcv); KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__)); KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__)); /* Statistics. */ if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; /* Fill uio until full or current end of socket buffer is reached. */ len = min(uio->uio_resid, sbavail(sb)); if (mp0 != NULL) { /* Dequeue as many mbufs as possible. */ if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) { if (*mp0 == NULL) *mp0 = sb->sb_mb; else m_cat(*mp0, sb->sb_mb); for (m = sb->sb_mb; m != NULL && m->m_len <= len; m = m->m_next) { KASSERT(!(m->m_flags & M_NOTAVAIL), ("%s: m %p not available", __func__, m)); len -= m->m_len; uio->uio_resid -= m->m_len; sbfree(sb, m); n = m; } n->m_next = NULL; sb->sb_mb = m; sb->sb_lastrecord = sb->sb_mb; if (sb->sb_mb == NULL) SB_EMPTY_FIXUP(sb); } /* Copy the remainder. */ if (len > 0) { KASSERT(sb->sb_mb != NULL, ("%s: len > 0 && sb->sb_mb empty", __func__)); m = m_copym(sb->sb_mb, 0, len, M_NOWAIT); if (m == NULL) len = 0; /* Don't flush data from sockbuf. */ else uio->uio_resid -= len; if (*mp0 != NULL) m_cat(*mp0, m); else *mp0 = m; if (*mp0 == NULL) { error = ENOBUFS; goto out; } } } else { /* NB: Must unlock socket buffer as uiomove may sleep. */ SOCKBUF_UNLOCK(sb); error = m_mbuftouio(uio, sb->sb_mb, len); SOCKBUF_LOCK(sb); if (error) goto out; } SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); /* * Remove the delivered data from the socket buffer unless we * were only peeking. */ if (!(flags & MSG_PEEK)) { if (len > 0) sbdrop_locked(sb, len); /* Notify protocol that we drained some data. */ if ((so->so_proto->pr_flags & PR_WANTRCVD) && (((flags & MSG_WAITALL) && uio->uio_resid > 0) || !(flags & MSG_SOCALLBCK))) { SOCKBUF_UNLOCK(sb); VNET_SO_ASSERT(so); so->so_proto->pr_rcvd(so, flags); SOCKBUF_LOCK(sb); } } /* * For MSG_WAITALL we may have to loop again and wait for * more data to come in. */ if ((flags & MSG_WAITALL) && uio->uio_resid > 0) goto restart; out: SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); SOCKBUF_UNLOCK(sb); SOCK_IO_RECV_UNLOCK(so); return (error); } /* * Optimized version of soreceive() for simple datagram cases from userspace. * Unlike in the stream case, we're able to drop a datagram if copyout() * fails, and because we handle datagrams atomically, we don't need to use a * sleep lock to prevent I/O interlacing. */ int soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { struct mbuf *m, *m2; int flags, error; ssize_t len; struct protosw *pr = so->so_proto; struct mbuf *nextrecord; if (psa != NULL) *psa = NULL; if (controlp != NULL) *controlp = NULL; if (flagsp != NULL) flags = *flagsp &~ MSG_EOR; else flags = 0; /* * For any complicated cases, fall back to the full * soreceive_generic(). */ if (mp0 != NULL || (flags & (MSG_PEEK | MSG_OOB | MSG_TRUNC))) return (soreceive_generic(so, psa, uio, mp0, controlp, flagsp)); /* * Enforce restrictions on use. */ KASSERT((pr->pr_flags & PR_WANTRCVD) == 0, ("soreceive_dgram: wantrcvd")); KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic")); KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0, ("soreceive_dgram: SBS_RCVATMARK")); KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0, ("soreceive_dgram: P_CONNREQUIRED")); /* * Loop blocking while waiting for a datagram. */ SOCKBUF_LOCK(&so->so_rcv); while ((m = so->so_rcv.sb_mb) == NULL) { KASSERT(sbavail(&so->so_rcv) == 0, ("soreceive_dgram: sb_mb NULL but sbavail %u", sbavail(&so->so_rcv))); if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_rcv); return (error); } if (so->so_rcv.sb_state & SBS_CANTRCVMORE || uio->uio_resid == 0) { SOCKBUF_UNLOCK(&so->so_rcv); return (0); } if ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO))) { SOCKBUF_UNLOCK(&so->so_rcv); return (EWOULDBLOCK); } SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); error = sbwait(so, SO_RCV); if (error) { SOCKBUF_UNLOCK(&so->so_rcv); return (error); } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); nextrecord = m->m_nextpkt; if (nextrecord == NULL) { KASSERT(so->so_rcv.sb_lastrecord == m, ("soreceive_dgram: lastrecord != m")); } KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord, ("soreceive_dgram: m_nextpkt != nextrecord")); /* * Pull 'm' and its chain off the front of the packet queue. */ so->so_rcv.sb_mb = NULL; sockbuf_pushsync(&so->so_rcv, nextrecord); /* * Walk 'm's chain and free that many bytes from the socket buffer. */ for (m2 = m; m2 != NULL; m2 = m2->m_next) sbfree(&so->so_rcv, m2); /* * Do a few last checks before we let go of the lock. */ SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); if (pr->pr_flags & PR_ADDR) { KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); if (psa != NULL) *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_NOWAIT); m = m_free(m); } if (m == NULL) { /* XXXRW: Can this happen? */ return (0); } /* * Packet to copyout() is now in 'm' and it is disconnected from the * queue. * * Process one or more MT_CONTROL mbufs present before any data mbufs * in the first mbuf chain on the socket buffer. We call into the * protocol to perform externalization (or freeing if controlp == * NULL). In some cases there can be only MT_CONTROL mbufs without * MT_DATA mbufs. */ if (m->m_type == MT_CONTROL) { struct mbuf *cm = NULL, *cmn; struct mbuf **cme = &cm; do { m2 = m->m_next; m->m_next = NULL; *cme = m; cme = &(*cme)->m_next; m = m2; } while (m != NULL && m->m_type == MT_CONTROL); while (cm != NULL) { cmn = cm->m_next; cm->m_next = NULL; if (pr->pr_domain->dom_externalize != NULL) { error = (*pr->pr_domain->dom_externalize) (cm, controlp, flags); } else if (controlp != NULL) *controlp = cm; else m_freem(cm); if (controlp != NULL) { while (*controlp != NULL) controlp = &(*controlp)->m_next; } cm = cmn; } } KASSERT(m == NULL || m->m_type == MT_DATA, ("soreceive_dgram: !data")); while (m != NULL && uio->uio_resid > 0) { len = uio->uio_resid; if (len > m->m_len) len = m->m_len; error = uiomove(mtod(m, char *), (int)len, uio); if (error) { m_freem(m); return (error); } if (len == m->m_len) m = m_free(m); else { m->m_data += len; m->m_len -= len; } } if (m != NULL) { flags |= MSG_TRUNC; m_freem(m); } if (flagsp != NULL) *flagsp |= flags; return (0); } int soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { int error; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_soreceive(so, psa, uio, mp0, controlp, flagsp); CURVNET_RESTORE(); return (error); } int soshutdown(struct socket *so, int how) { struct protosw *pr; int error, soerror_enotconn; if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) return (EINVAL); soerror_enotconn = 0; SOCK_LOCK(so); if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { /* * POSIX mandates us to return ENOTCONN when shutdown(2) is * invoked on a datagram sockets, however historically we would * actually tear socket down. This is known to be leveraged by * some applications to unblock process waiting in recvXXX(2) * by other process that it shares that socket with. Try to meet * both backward-compatibility and POSIX requirements by forcing * ENOTCONN but still asking protocol to perform pru_shutdown(). */ if (so->so_type != SOCK_DGRAM && !SOLISTENING(so)) { SOCK_UNLOCK(so); return (ENOTCONN); } soerror_enotconn = 1; } if (SOLISTENING(so)) { if (how != SHUT_WR) { so->so_error = ECONNABORTED; solisten_wakeup(so); /* unlocks so */ } else { SOCK_UNLOCK(so); } goto done; } SOCK_UNLOCK(so); CURVNET_SET(so->so_vnet); pr = so->so_proto; if (pr->pr_flush != NULL) pr->pr_flush(so, how); if (how != SHUT_WR) sorflush(so); if (how != SHUT_RD) { error = pr->pr_shutdown(so); wakeup(&so->so_timeo); CURVNET_RESTORE(); return ((error == 0 && soerror_enotconn) ? ENOTCONN : error); } wakeup(&so->so_timeo); CURVNET_RESTORE(); done: return (soerror_enotconn ? ENOTCONN : 0); } void sorflush(struct socket *so) { struct protosw *pr; int error; VNET_SO_ASSERT(so); /* * Dislodge threads currently blocked in receive and wait to acquire * a lock against other simultaneous readers before clearing the * socket buffer. Don't let our acquire be interrupted by a signal * despite any existing socket disposition on interruptable waiting. */ socantrcvmore(so); error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR); if (error != 0) { KASSERT(SOLISTENING(so), ("%s: soiolock(%p) failed", __func__, so)); return; } pr = so->so_proto; if (pr->pr_flags & PR_RIGHTS) { MPASS(pr->pr_domain->dom_dispose != NULL); (*pr->pr_domain->dom_dispose)(so); } else { sbrelease(so, SO_RCV); SOCK_IO_RECV_UNLOCK(so); } } /* * Wrapper for Socket established helper hook. * Parameters: socket, context of the hook point, hook id. */ static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id) { struct socket_hhook_data hhook_data = { .so = so, .hctx = hctx, .m = NULL, .status = 0 }; CURVNET_SET(so->so_vnet); HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd); CURVNET_RESTORE(); /* Ugly but needed, since hhooks return void for now */ return (hhook_data.status); } /* * Perhaps this routine, and sooptcopyout(), below, ought to come in an * additional variant to handle the case where the option value needs to be * some kind of integer, but not a specific size. In addition to their use * here, these functions are also called by the protocol-level pr_ctloutput() * routines. */ int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) { size_t valsize; /* * If the user gives us more than we wanted, we ignore it, but if we * don't get the minimum length the caller wants, we return EINVAL. * On success, sopt->sopt_valsize is set to however much we actually * retrieved. */ if ((valsize = sopt->sopt_valsize) < minlen) return EINVAL; if (valsize > len) sopt->sopt_valsize = valsize = len; if (sopt->sopt_td != NULL) return (copyin(sopt->sopt_val, buf, valsize)); bcopy(sopt->sopt_val, buf, valsize); return (0); } /* * Kernel version of setsockopt(2). * * XXX: optlen is size_t, not socklen_t */ int so_setsockopt(struct socket *so, int level, int optname, void *optval, size_t optlen) { struct sockopt sopt; sopt.sopt_level = level; sopt.sopt_name = optname; sopt.sopt_dir = SOPT_SET; sopt.sopt_val = optval; sopt.sopt_valsize = optlen; sopt.sopt_td = NULL; return (sosetopt(so, &sopt)); } int sosetopt(struct socket *so, struct sockopt *sopt) { int error, optval; struct linger l; struct timeval tv; sbintime_t val, *valp; uint32_t val32; #ifdef MAC struct mac extmac; #endif CURVNET_SET(so->so_vnet); error = 0; if (sopt->sopt_level != SOL_SOCKET) { if (so->so_proto->pr_ctloutput != NULL) error = (*so->so_proto->pr_ctloutput)(so, sopt); else error = ENOPROTOOPT; } else { switch (sopt->sopt_name) { case SO_ACCEPTFILTER: error = accept_filt_setopt(so, sopt); if (error) goto bad; break; case SO_LINGER: error = sooptcopyin(sopt, &l, sizeof l, sizeof l); if (error) goto bad; if (l.l_linger < 0 || l.l_linger > USHRT_MAX || l.l_linger > (INT_MAX / hz)) { error = EDOM; goto bad; } SOCK_LOCK(so); so->so_linger = l.l_linger; if (l.l_onoff) so->so_options |= SO_LINGER; else so->so_options &= ~SO_LINGER; SOCK_UNLOCK(so); break; case SO_DEBUG: case SO_KEEPALIVE: case SO_DONTROUTE: case SO_USELOOPBACK: case SO_BROADCAST: case SO_REUSEADDR: case SO_REUSEPORT: case SO_REUSEPORT_LB: case SO_OOBINLINE: case SO_TIMESTAMP: case SO_BINTIME: case SO_NOSIGPIPE: case SO_NO_DDP: case SO_NO_OFFLOAD: case SO_RERROR: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) goto bad; SOCK_LOCK(so); if (optval) so->so_options |= sopt->sopt_name; else so->so_options &= ~sopt->sopt_name; SOCK_UNLOCK(so); break; case SO_SETFIB: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) goto bad; if (optval < 0 || optval >= rt_numfibs) { error = EINVAL; goto bad; } if (((so->so_proto->pr_domain->dom_family == PF_INET) || (so->so_proto->pr_domain->dom_family == PF_INET6) || (so->so_proto->pr_domain->dom_family == PF_ROUTE))) so->so_fibnum = optval; else so->so_fibnum = 0; break; case SO_USER_COOKIE: error = sooptcopyin(sopt, &val32, sizeof val32, sizeof val32); if (error) goto bad; so->so_user_cookie = val32; break; case SO_SNDBUF: case SO_RCVBUF: case SO_SNDLOWAT: case SO_RCVLOWAT: error = so->so_proto->pr_setsbopt(so, sopt); if (error) goto bad; break; case SO_SNDTIMEO: case SO_RCVTIMEO: #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { struct timeval32 tv32; error = sooptcopyin(sopt, &tv32, sizeof tv32, sizeof tv32); CP(tv32, tv, tv_sec); CP(tv32, tv, tv_usec); } else #endif error = sooptcopyin(sopt, &tv, sizeof tv, sizeof tv); if (error) goto bad; if (tv.tv_sec < 0 || tv.tv_usec < 0 || tv.tv_usec >= 1000000) { error = EDOM; goto bad; } if (tv.tv_sec > INT32_MAX) val = SBT_MAX; else val = tvtosbt(tv); SOCK_LOCK(so); valp = sopt->sopt_name == SO_SNDTIMEO ? (SOLISTENING(so) ? &so->sol_sbsnd_timeo : &so->so_snd.sb_timeo) : (SOLISTENING(so) ? &so->sol_sbrcv_timeo : &so->so_rcv.sb_timeo); *valp = val; SOCK_UNLOCK(so); break; case SO_LABEL: #ifdef MAC error = sooptcopyin(sopt, &extmac, sizeof extmac, sizeof extmac); if (error) goto bad; error = mac_setsockopt_label(sopt->sopt_td->td_ucred, so, &extmac); #else error = EOPNOTSUPP; #endif break; case SO_TS_CLOCK: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) goto bad; if (optval < 0 || optval > SO_TS_CLOCK_MAX) { error = EINVAL; goto bad; } so->so_ts_clock = optval; break; case SO_MAX_PACING_RATE: error = sooptcopyin(sopt, &val32, sizeof(val32), sizeof(val32)); if (error) goto bad; so->so_max_pacing_rate = val32; break; default: if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0) error = hhook_run_socket(so, sopt, HHOOK_SOCKET_OPT); else error = ENOPROTOOPT; break; } if (error == 0 && so->so_proto->pr_ctloutput != NULL) (void)(*so->so_proto->pr_ctloutput)(so, sopt); } bad: CURVNET_RESTORE(); return (error); } /* * Helper routine for getsockopt. */ int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) { int error; size_t valsize; error = 0; /* * Documented get behavior is that we always return a value, possibly * truncated to fit in the user's buffer. Traditional behavior is * that we always tell the user precisely how much we copied, rather * than something useful like the total amount we had available for * her. Note that this interface is not idempotent; the entire * answer must be generated ahead of time. */ valsize = min(len, sopt->sopt_valsize); sopt->sopt_valsize = valsize; if (sopt->sopt_val != NULL) { if (sopt->sopt_td != NULL) error = copyout(buf, sopt->sopt_val, valsize); else bcopy(buf, sopt->sopt_val, valsize); } return (error); } int sogetopt(struct socket *so, struct sockopt *sopt) { int error, optval; struct linger l; struct timeval tv; #ifdef MAC struct mac extmac; #endif CURVNET_SET(so->so_vnet); error = 0; if (sopt->sopt_level != SOL_SOCKET) { if (so->so_proto->pr_ctloutput != NULL) error = (*so->so_proto->pr_ctloutput)(so, sopt); else error = ENOPROTOOPT; CURVNET_RESTORE(); return (error); } else { switch (sopt->sopt_name) { case SO_ACCEPTFILTER: error = accept_filt_getopt(so, sopt); break; case SO_LINGER: SOCK_LOCK(so); l.l_onoff = so->so_options & SO_LINGER; l.l_linger = so->so_linger; SOCK_UNLOCK(so); error = sooptcopyout(sopt, &l, sizeof l); break; case SO_USELOOPBACK: case SO_DONTROUTE: case SO_DEBUG: case SO_KEEPALIVE: case SO_REUSEADDR: case SO_REUSEPORT: case SO_REUSEPORT_LB: case SO_BROADCAST: case SO_OOBINLINE: case SO_ACCEPTCONN: case SO_TIMESTAMP: case SO_BINTIME: case SO_NOSIGPIPE: case SO_NO_DDP: case SO_NO_OFFLOAD: case SO_RERROR: optval = so->so_options & sopt->sopt_name; integer: error = sooptcopyout(sopt, &optval, sizeof optval); break; case SO_DOMAIN: optval = so->so_proto->pr_domain->dom_family; goto integer; case SO_TYPE: optval = so->so_type; goto integer; case SO_PROTOCOL: optval = so->so_proto->pr_protocol; goto integer; case SO_ERROR: SOCK_LOCK(so); if (so->so_error) { optval = so->so_error; so->so_error = 0; } else { optval = so->so_rerror; so->so_rerror = 0; } SOCK_UNLOCK(so); goto integer; case SO_SNDBUF: optval = SOLISTENING(so) ? so->sol_sbsnd_hiwat : so->so_snd.sb_hiwat; goto integer; case SO_RCVBUF: optval = SOLISTENING(so) ? so->sol_sbrcv_hiwat : so->so_rcv.sb_hiwat; goto integer; case SO_SNDLOWAT: optval = SOLISTENING(so) ? so->sol_sbsnd_lowat : so->so_snd.sb_lowat; goto integer; case SO_RCVLOWAT: optval = SOLISTENING(so) ? so->sol_sbrcv_lowat : so->so_rcv.sb_lowat; goto integer; case SO_SNDTIMEO: case SO_RCVTIMEO: SOCK_LOCK(so); tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ? (SOLISTENING(so) ? so->sol_sbsnd_timeo : so->so_snd.sb_timeo) : (SOLISTENING(so) ? so->sol_sbrcv_timeo : so->so_rcv.sb_timeo)); SOCK_UNLOCK(so); #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { struct timeval32 tv32; CP(tv, tv32, tv_sec); CP(tv, tv32, tv_usec); error = sooptcopyout(sopt, &tv32, sizeof tv32); } else #endif error = sooptcopyout(sopt, &tv, sizeof tv); break; case SO_LABEL: #ifdef MAC error = sooptcopyin(sopt, &extmac, sizeof(extmac), sizeof(extmac)); if (error) goto bad; error = mac_getsockopt_label(sopt->sopt_td->td_ucred, so, &extmac); if (error) goto bad; /* Don't copy out extmac, it is unchanged. */ #else error = EOPNOTSUPP; #endif break; case SO_PEERLABEL: #ifdef MAC error = sooptcopyin(sopt, &extmac, sizeof(extmac), sizeof(extmac)); if (error) goto bad; error = mac_getsockopt_peerlabel( sopt->sopt_td->td_ucred, so, &extmac); if (error) goto bad; /* Don't copy out extmac, it is unchanged. */ #else error = EOPNOTSUPP; #endif break; case SO_LISTENQLIMIT: optval = SOLISTENING(so) ? so->sol_qlimit : 0; goto integer; case SO_LISTENQLEN: optval = SOLISTENING(so) ? so->sol_qlen : 0; goto integer; case SO_LISTENINCQLEN: optval = SOLISTENING(so) ? so->sol_incqlen : 0; goto integer; case SO_TS_CLOCK: optval = so->so_ts_clock; goto integer; case SO_MAX_PACING_RATE: optval = so->so_max_pacing_rate; goto integer; default: if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0) error = hhook_run_socket(so, sopt, HHOOK_SOCKET_OPT); else error = ENOPROTOOPT; break; } } #ifdef MAC bad: #endif CURVNET_RESTORE(); return (error); } int soopt_getm(struct sockopt *sopt, struct mbuf **mp) { struct mbuf *m, *m_prev; int sopt_size = sopt->sopt_valsize; MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA); if (m == NULL) return ENOBUFS; if (sopt_size > MLEN) { MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(m); return ENOBUFS; } m->m_len = min(MCLBYTES, sopt_size); } else { m->m_len = min(MLEN, sopt_size); } sopt_size -= m->m_len; *mp = m; m_prev = m; while (sopt_size) { MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(*mp); return ENOBUFS; } if (sopt_size > MLEN) { MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK : M_NOWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); m_freem(*mp); return ENOBUFS; } m->m_len = min(MCLBYTES, sopt_size); } else { m->m_len = min(MLEN, sopt_size); } sopt_size -= m->m_len; m_prev->m_next = m; m_prev = m; } return (0); } int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) { struct mbuf *m0 = m; if (sopt->sopt_val == NULL) return (0); while (m != NULL && sopt->sopt_valsize >= m->m_len) { if (sopt->sopt_td != NULL) { int error; error = copyin(sopt->sopt_val, mtod(m, char *), m->m_len); if (error != 0) { m_freem(m0); return(error); } } else bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); sopt->sopt_valsize -= m->m_len; sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; m = m->m_next; } if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ panic("ip6_sooptmcopyin"); return (0); } int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) { struct mbuf *m0 = m; size_t valsize = 0; if (sopt->sopt_val == NULL) return (0); while (m != NULL && sopt->sopt_valsize >= m->m_len) { if (sopt->sopt_td != NULL) { int error; error = copyout(mtod(m, char *), sopt->sopt_val, m->m_len); if (error != 0) { m_freem(m0); return(error); } } else bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); sopt->sopt_valsize -= m->m_len; sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; valsize += m->m_len; m = m->m_next; } if (m != NULL) { /* enough soopt buffer should be given from user-land */ m_freem(m0); return(EINVAL); } sopt->sopt_valsize = valsize; return (0); } /* * sohasoutofband(): protocol notifies socket layer of the arrival of new * out-of-band data, which will then notify socket consumers. */ void sohasoutofband(struct socket *so) { if (so->so_sigio != NULL) pgsigio(&so->so_sigio, SIGURG, 0); selwakeuppri(&so->so_rdsel, PSOCK); } int sopoll(struct socket *so, int events, struct ucred *active_cred, struct thread *td) { /* * We do not need to set or assert curvnet as long as everyone uses * sopoll_generic(). */ return (so->so_proto->pr_sopoll(so, events, active_cred, td)); } int sopoll_generic(struct socket *so, int events, struct ucred *active_cred, struct thread *td) { int revents; SOCK_LOCK(so); if (SOLISTENING(so)) { if (!(events & (POLLIN | POLLRDNORM))) revents = 0; else if (!TAILQ_EMPTY(&so->sol_comp)) revents = events & (POLLIN | POLLRDNORM); else if ((events & POLLINIGNEOF) == 0 && so->so_error) revents = (events & (POLLIN | POLLRDNORM)) | POLLHUP; else { selrecord(td, &so->so_rdsel); revents = 0; } } else { revents = 0; SOCK_SENDBUF_LOCK(so); SOCK_RECVBUF_LOCK(so); if (events & (POLLIN | POLLRDNORM)) if (soreadabledata(so)) revents |= events & (POLLIN | POLLRDNORM); if (events & (POLLOUT | POLLWRNORM)) if (sowriteable(so)) revents |= events & (POLLOUT | POLLWRNORM); if (events & (POLLPRI | POLLRDBAND)) if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) revents |= events & (POLLPRI | POLLRDBAND); if ((events & POLLINIGNEOF) == 0) { if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { revents |= events & (POLLIN | POLLRDNORM); if (so->so_snd.sb_state & SBS_CANTSENDMORE) revents |= POLLHUP; } } if (so->so_rcv.sb_state & SBS_CANTRCVMORE) revents |= events & POLLRDHUP; if (revents == 0) { if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND | POLLRDHUP)) { selrecord(td, &so->so_rdsel); so->so_rcv.sb_flags |= SB_SEL; } if (events & (POLLOUT | POLLWRNORM)) { selrecord(td, &so->so_wrsel); so->so_snd.sb_flags |= SB_SEL; } } SOCK_RECVBUF_UNLOCK(so); SOCK_SENDBUF_UNLOCK(so); } SOCK_UNLOCK(so); return (revents); } int soo_kqfilter(struct file *fp, struct knote *kn) { struct socket *so = kn->kn_fp->f_data; struct sockbuf *sb; sb_which which; struct knlist *knl; switch (kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &soread_filtops; knl = &so->so_rdsel.si_note; sb = &so->so_rcv; which = SO_RCV; break; case EVFILT_WRITE: kn->kn_fop = &sowrite_filtops; knl = &so->so_wrsel.si_note; sb = &so->so_snd; which = SO_SND; break; case EVFILT_EMPTY: kn->kn_fop = &soempty_filtops; knl = &so->so_wrsel.si_note; sb = &so->so_snd; which = SO_SND; break; default: return (EINVAL); } SOCK_LOCK(so); if (SOLISTENING(so)) { knlist_add(knl, kn, 1); } else { SOCK_BUF_LOCK(so, which); knlist_add(knl, kn, 1); sb->sb_flags |= SB_KNOTE; SOCK_BUF_UNLOCK(so, which); } SOCK_UNLOCK(so); return (0); } static void filt_sordetach(struct knote *kn) { struct socket *so = kn->kn_fp->f_data; so_rdknl_lock(so); knlist_remove(&so->so_rdsel.si_note, kn, 1); if (!SOLISTENING(so) && knlist_empty(&so->so_rdsel.si_note)) so->so_rcv.sb_flags &= ~SB_KNOTE; so_rdknl_unlock(so); } /*ARGSUSED*/ static int filt_soread(struct knote *kn, long hint) { struct socket *so; so = kn->kn_fp->f_data; if (SOLISTENING(so)) { SOCK_LOCK_ASSERT(so); kn->kn_data = so->sol_qlen; if (so->so_error) { kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; return (1); } return (!TAILQ_EMPTY(&so->sol_comp)); } SOCK_RECVBUF_LOCK_ASSERT(so); kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl; if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; return (1); } else if (so->so_error || so->so_rerror) return (1); if (kn->kn_sfflags & NOTE_LOWAT) { if (kn->kn_data >= kn->kn_sdata) return (1); } else if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat) return (1); /* This hook returning non-zero indicates an event, not error */ return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD)); } static void filt_sowdetach(struct knote *kn) { struct socket *so = kn->kn_fp->f_data; so_wrknl_lock(so); knlist_remove(&so->so_wrsel.si_note, kn, 1); if (!SOLISTENING(so) && knlist_empty(&so->so_wrsel.si_note)) so->so_snd.sb_flags &= ~SB_KNOTE; so_wrknl_unlock(so); } /*ARGSUSED*/ static int filt_sowrite(struct knote *kn, long hint) { struct socket *so; so = kn->kn_fp->f_data; if (SOLISTENING(so)) return (0); SOCK_SENDBUF_LOCK_ASSERT(so); kn->kn_data = sbspace(&so->so_snd); hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; return (1); } else if (so->so_error) /* temporary udp error */ return (1); else if (((so->so_state & SS_ISCONNECTED) == 0) && (so->so_proto->pr_flags & PR_CONNREQUIRED)) return (0); else if (kn->kn_sfflags & NOTE_LOWAT) return (kn->kn_data >= kn->kn_sdata); else return (kn->kn_data >= so->so_snd.sb_lowat); } static int filt_soempty(struct knote *kn, long hint) { struct socket *so; so = kn->kn_fp->f_data; if (SOLISTENING(so)) return (1); SOCK_SENDBUF_LOCK_ASSERT(so); kn->kn_data = sbused(&so->so_snd); if (kn->kn_data == 0) return (1); else return (0); } int socheckuid(struct socket *so, uid_t uid) { if (so == NULL) return (EPERM); if (so->so_cred->cr_uid != uid) return (EPERM); return (0); } /* * These functions are used by protocols to notify the socket layer (and its * consumers) of state changes in the sockets driven by protocol-side events. */ /* * Procedures to manipulate state flags of socket and do appropriate wakeups. * * Normal sequence from the active (originating) side is that * soisconnecting() is called during processing of connect() call, resulting * in an eventual call to soisconnected() if/when the connection is * established. When the connection is torn down soisdisconnecting() is * called during processing of disconnect() call, and soisdisconnected() is * called when the connection to the peer is totally severed. The semantics * of these routines are such that connectionless protocols can call * soisconnected() and soisdisconnected() only, bypassing the in-progress * calls when setting up a ``connection'' takes no time. * * From the passive side, a socket is created with two queues of sockets: * so_incomp for connections in progress and so_comp for connections already * made and awaiting user acceptance. As a protocol is preparing incoming * connections, it creates a socket structure queued on so_incomp by calling * sonewconn(). When the connection is established, soisconnected() is * called, and transfers the socket structure to so_comp, making it available * to accept(). * * If a socket is closed with sockets on either so_incomp or so_comp, these * sockets are dropped. * * If higher-level protocols are implemented in the kernel, the wakeups done * here will sometimes cause software-interrupt process scheduling. */ void soisconnecting(struct socket *so) { SOCK_LOCK(so); so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); so->so_state |= SS_ISCONNECTING; SOCK_UNLOCK(so); } void soisconnected(struct socket *so) { bool last __diagused; SOCK_LOCK(so); so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); so->so_state |= SS_ISCONNECTED; if (so->so_qstate == SQ_INCOMP) { struct socket *head = so->so_listen; int ret; KASSERT(head, ("%s: so %p on incomp of NULL", __func__, so)); /* * Promoting a socket from incomplete queue to complete, we * need to go through reverse order of locking. We first do * trylock, and if that doesn't succeed, we go the hard way * leaving a reference and rechecking consistency after proper * locking. */ if (__predict_false(SOLISTEN_TRYLOCK(head) == 0)) { soref(head); SOCK_UNLOCK(so); SOLISTEN_LOCK(head); SOCK_LOCK(so); if (__predict_false(head != so->so_listen)) { /* * The socket went off the listen queue, * should be lost race to close(2) of sol. * The socket is about to soabort(). */ SOCK_UNLOCK(so); sorele_locked(head); return; } last = refcount_release(&head->so_count); KASSERT(!last, ("%s: released last reference for %p", __func__, head)); } again: if ((so->so_options & SO_ACCEPTFILTER) == 0) { TAILQ_REMOVE(&head->sol_incomp, so, so_list); head->sol_incqlen--; TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list); head->sol_qlen++; so->so_qstate = SQ_COMP; SOCK_UNLOCK(so); solisten_wakeup(head); /* unlocks */ } else { SOCK_RECVBUF_LOCK(so); soupcall_set(so, SO_RCV, head->sol_accept_filter->accf_callback, head->sol_accept_filter_arg); so->so_options &= ~SO_ACCEPTFILTER; ret = head->sol_accept_filter->accf_callback(so, head->sol_accept_filter_arg, M_NOWAIT); if (ret == SU_ISCONNECTED) { soupcall_clear(so, SO_RCV); SOCK_RECVBUF_UNLOCK(so); goto again; } SOCK_RECVBUF_UNLOCK(so); SOCK_UNLOCK(so); SOLISTEN_UNLOCK(head); } return; } SOCK_UNLOCK(so); wakeup(&so->so_timeo); sorwakeup(so); sowwakeup(so); } void soisdisconnecting(struct socket *so) { SOCK_LOCK(so); so->so_state &= ~SS_ISCONNECTING; so->so_state |= SS_ISDISCONNECTING; if (!SOLISTENING(so)) { SOCK_RECVBUF_LOCK(so); socantrcvmore_locked(so); SOCK_SENDBUF_LOCK(so); socantsendmore_locked(so); } SOCK_UNLOCK(so); wakeup(&so->so_timeo); } void soisdisconnected(struct socket *so) { SOCK_LOCK(so); /* * There is at least one reader of so_state that does not * acquire socket lock, namely soreceive_generic(). Ensure * that it never sees all flags that track connection status * cleared, by ordering the update with a barrier semantic of * our release thread fence. */ so->so_state |= SS_ISDISCONNECTED; atomic_thread_fence_rel(); so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); if (!SOLISTENING(so)) { SOCK_UNLOCK(so); SOCK_RECVBUF_LOCK(so); socantrcvmore_locked(so); SOCK_SENDBUF_LOCK(so); sbdrop_locked(&so->so_snd, sbused(&so->so_snd)); socantsendmore_locked(so); } else SOCK_UNLOCK(so); wakeup(&so->so_timeo); } int soiolock(struct socket *so, struct sx *sx, int flags) { int error; KASSERT((flags & SBL_VALID) == flags, ("soiolock: invalid flags %#x", flags)); if ((flags & SBL_WAIT) != 0) { if ((flags & SBL_NOINTR) != 0) { sx_xlock(sx); } else { error = sx_xlock_sig(sx); if (error != 0) return (error); } } else if (!sx_try_xlock(sx)) { return (EWOULDBLOCK); } if (__predict_false(SOLISTENING(so))) { sx_xunlock(sx); return (ENOTCONN); } return (0); } void soiounlock(struct sx *sx) { sx_xunlock(sx); } /* * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. */ struct sockaddr * sodupsockaddr(const struct sockaddr *sa, int mflags) { struct sockaddr *sa2; sa2 = malloc(sa->sa_len, M_SONAME, mflags); if (sa2) bcopy(sa, sa2, sa->sa_len); return sa2; } /* * Register per-socket destructor. */ void sodtor_set(struct socket *so, so_dtor_t *func) { SOCK_LOCK_ASSERT(so); so->so_dtor = func; } /* * Register per-socket buffer upcalls. */ void soupcall_set(struct socket *so, sb_which which, so_upcall_t func, void *arg) { struct sockbuf *sb; KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so)); switch (which) { case SO_RCV: sb = &so->so_rcv; break; case SO_SND: sb = &so->so_snd; break; } SOCK_BUF_LOCK_ASSERT(so, which); sb->sb_upcall = func; sb->sb_upcallarg = arg; sb->sb_flags |= SB_UPCALL; } void soupcall_clear(struct socket *so, sb_which which) { struct sockbuf *sb; KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so)); switch (which) { case SO_RCV: sb = &so->so_rcv; break; case SO_SND: sb = &so->so_snd; break; } SOCK_BUF_LOCK_ASSERT(so, which); KASSERT(sb->sb_upcall != NULL, ("%s: so %p no upcall to clear", __func__, so)); sb->sb_upcall = NULL; sb->sb_upcallarg = NULL; sb->sb_flags &= ~SB_UPCALL; } void solisten_upcall_set(struct socket *so, so_upcall_t func, void *arg) { SOLISTEN_LOCK_ASSERT(so); so->sol_upcall = func; so->sol_upcallarg = arg; } static void so_rdknl_lock(void *arg) { struct socket *so = arg; retry: if (SOLISTENING(so)) { SOLISTEN_LOCK(so); } else { SOCK_RECVBUF_LOCK(so); if (__predict_false(SOLISTENING(so))) { SOCK_RECVBUF_UNLOCK(so); goto retry; } } } static void so_rdknl_unlock(void *arg) { struct socket *so = arg; if (SOLISTENING(so)) SOLISTEN_UNLOCK(so); else SOCK_RECVBUF_UNLOCK(so); } static void so_rdknl_assert_lock(void *arg, int what) { struct socket *so = arg; if (what == LA_LOCKED) { if (SOLISTENING(so)) SOLISTEN_LOCK_ASSERT(so); else SOCK_RECVBUF_LOCK_ASSERT(so); } else { if (SOLISTENING(so)) SOLISTEN_UNLOCK_ASSERT(so); else SOCK_RECVBUF_UNLOCK_ASSERT(so); } } static void so_wrknl_lock(void *arg) { struct socket *so = arg; retry: if (SOLISTENING(so)) { SOLISTEN_LOCK(so); } else { SOCK_SENDBUF_LOCK(so); if (__predict_false(SOLISTENING(so))) { SOCK_SENDBUF_UNLOCK(so); goto retry; } } } static void so_wrknl_unlock(void *arg) { struct socket *so = arg; if (SOLISTENING(so)) SOLISTEN_UNLOCK(so); else SOCK_SENDBUF_UNLOCK(so); } static void so_wrknl_assert_lock(void *arg, int what) { struct socket *so = arg; if (what == LA_LOCKED) { if (SOLISTENING(so)) SOLISTEN_LOCK_ASSERT(so); else SOCK_SENDBUF_LOCK_ASSERT(so); } else { if (SOLISTENING(so)) SOLISTEN_UNLOCK_ASSERT(so); else SOCK_SENDBUF_UNLOCK_ASSERT(so); } } /* * Create an external-format (``xsocket'') structure using the information in * the kernel-format socket structure pointed to by so. This is done to * reduce the spew of irrelevant information over this interface, to isolate * user code from changes in the kernel structure, and potentially to provide * information-hiding if we decide that some of this information should be * hidden from users. */ void sotoxsocket(struct socket *so, struct xsocket *xso) { bzero(xso, sizeof(*xso)); xso->xso_len = sizeof *xso; xso->xso_so = (uintptr_t)so; xso->so_type = so->so_type; xso->so_options = so->so_options; xso->so_linger = so->so_linger; xso->so_state = so->so_state; xso->so_pcb = (uintptr_t)so->so_pcb; xso->xso_protocol = so->so_proto->pr_protocol; xso->xso_family = so->so_proto->pr_domain->dom_family; xso->so_timeo = so->so_timeo; xso->so_error = so->so_error; xso->so_uid = so->so_cred->cr_uid; xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; if (SOLISTENING(so)) { xso->so_qlen = so->sol_qlen; xso->so_incqlen = so->sol_incqlen; xso->so_qlimit = so->sol_qlimit; xso->so_oobmark = 0; } else { xso->so_state |= so->so_qstate; xso->so_qlen = xso->so_incqlen = xso->so_qlimit = 0; xso->so_oobmark = so->so_oobmark; sbtoxsockbuf(&so->so_snd, &xso->so_snd); sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); } } struct sockbuf * so_sockbuf_rcv(struct socket *so) { return (&so->so_rcv); } struct sockbuf * so_sockbuf_snd(struct socket *so) { return (&so->so_snd); } int so_state_get(const struct socket *so) { return (so->so_state); } void so_state_set(struct socket *so, int val) { so->so_state = val; } int so_options_get(const struct socket *so) { return (so->so_options); } void so_options_set(struct socket *so, int val) { so->so_options = val; } int so_error_get(const struct socket *so) { return (so->so_error); } void so_error_set(struct socket *so, int val) { so->so_error = val; } int so_linger_get(const struct socket *so) { return (so->so_linger); } void so_linger_set(struct socket *so, int val) { KASSERT(val >= 0 && val <= USHRT_MAX && val <= (INT_MAX / hz), ("%s: val %d out of range", __func__, val)); so->so_linger = val; } struct protosw * so_protosw_get(const struct socket *so) { return (so->so_proto); } void so_protosw_set(struct socket *so, struct protosw *val) { so->so_proto = val; } void so_sorwakeup(struct socket *so) { sorwakeup(so); } void so_sowwakeup(struct socket *so) { sowwakeup(so); } void so_sorwakeup_locked(struct socket *so) { sorwakeup_locked(so); } void so_sowwakeup_locked(struct socket *so) { sowwakeup_locked(so); } void so_lock(struct socket *so) { SOCK_LOCK(so); } void so_unlock(struct socket *so) { SOCK_UNLOCK(so); } diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index 0361144f2763..e46fdef84fc9 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -1,1586 +1,1552 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_capsicum.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef COMPAT_43 #include #endif #include #include #include #ifdef KTRACE #include #endif #ifdef COMPAT_FREEBSD32 #include #endif #include #include #include static int sendit(struct thread *td, int s, struct msghdr *mp, int flags); static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp); static int accept1(struct thread *td, int s, struct sockaddr *uname, socklen_t *anamelen, int flags); static int sockargs(struct mbuf **, char *, socklen_t, int); /* * Convert a user file descriptor to a kernel file entry and check if required * capability rights are present. * If required copy of current set of capability rights is returned. * A reference on the file entry is held upon returning. */ int getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp, struct filecaps *havecapsp) { struct file *fp; int error; error = fget_cap(td, fd, rightsp, &fp, havecapsp); if (__predict_false(error != 0)) return (error); if (__predict_false(fp->f_type != DTYPE_SOCKET)) { fdrop(fp, td); if (havecapsp != NULL) filecaps_free(havecapsp); return (ENOTSOCK); } *fpp = fp; return (0); } int getsock(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp) { struct file *fp; int error; error = fget_unlocked(td, fd, rightsp, &fp); if (__predict_false(error != 0)) return (error); if (__predict_false(fp->f_type != DTYPE_SOCKET)) { fdrop(fp, td); return (ENOTSOCK); } *fpp = fp; return (0); } /* * System call interface to the socket abstraction. */ #if defined(COMPAT_43) #define COMPAT_OLDSOCK #endif int sys_socket(struct thread *td, struct socket_args *uap) { return (kern_socket(td, uap->domain, uap->type, uap->protocol)); } int kern_socket(struct thread *td, int domain, int type, int protocol) { struct socket *so; struct file *fp; int fd, error, oflag, fflag; AUDIT_ARG_SOCKET(domain, type, protocol); oflag = 0; fflag = 0; if ((type & SOCK_CLOEXEC) != 0) { type &= ~SOCK_CLOEXEC; oflag |= O_CLOEXEC; } if ((type & SOCK_NONBLOCK) != 0) { type &= ~SOCK_NONBLOCK; fflag |= FNONBLOCK; } #ifdef MAC error = mac_socket_check_create(td->td_ucred, domain, type, protocol); if (error != 0) return (error); #endif error = falloc(td, &fp, &fd, oflag); if (error != 0) return (error); /* An extra reference on `fp' has been held for us by falloc(). */ error = socreate(domain, &so, type, protocol, td->td_ucred, td); if (error != 0) { fdclose(td, fp, fd); } else { finit(fp, FREAD | FWRITE | fflag, DTYPE_SOCKET, so, &socketops); if ((fflag & FNONBLOCK) != 0) (void) fo_ioctl(fp, FIONBIO, &fflag, td->td_ucred, td); td->td_retval[0] = fd; } fdrop(fp, td); return (error); } int sys_bind(struct thread *td, struct bind_args *uap) { struct sockaddr *sa; int error; error = getsockaddr(&sa, uap->name, uap->namelen); if (error == 0) { error = kern_bindat(td, AT_FDCWD, uap->s, sa); free(sa, M_SONAME); } return (error); } int kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa) { struct socket *so; struct file *fp; int error; #ifdef CAPABILITY_MODE if (IN_CAPABILITY_MODE(td) && (dirfd == AT_FDCWD)) return (ECAPMODE); #endif AUDIT_ARG_FD(fd); AUDIT_ARG_SOCKADDR(td, dirfd, sa); error = getsock(td, fd, &cap_bind_rights, &fp); if (error != 0) return (error); so = fp->f_data; #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT)) ktrsockaddr(sa); #endif #ifdef MAC error = mac_socket_check_bind(td->td_ucred, so, sa); if (error == 0) { #endif if (dirfd == AT_FDCWD) error = sobind(so, sa, td); else error = sobindat(dirfd, so, sa, td); #ifdef MAC } #endif fdrop(fp, td); return (error); } int sys_bindat(struct thread *td, struct bindat_args *uap) { struct sockaddr *sa; int error; error = getsockaddr(&sa, uap->name, uap->namelen); if (error == 0) { error = kern_bindat(td, uap->fd, uap->s, sa); free(sa, M_SONAME); } return (error); } int sys_listen(struct thread *td, struct listen_args *uap) { return (kern_listen(td, uap->s, uap->backlog)); } int kern_listen(struct thread *td, int s, int backlog) { struct socket *so; struct file *fp; int error; AUDIT_ARG_FD(s); error = getsock(td, s, &cap_listen_rights, &fp); if (error == 0) { so = fp->f_data; #ifdef MAC error = mac_socket_check_listen(td->td_ucred, so); if (error == 0) #endif error = solisten(so, backlog, td); fdrop(fp, td); } return (error); } /* * accept1() */ static int accept1(struct thread *td, int s, struct sockaddr *uname, socklen_t *anamelen, int flags) { struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; socklen_t addrlen; struct file *fp; int error; if (uname != NULL) { error = copyin(anamelen, &addrlen, sizeof(addrlen)); if (error != 0) return (error); } error = kern_accept4(td, s, (struct sockaddr *)&ss, flags, &fp); if (error != 0) return (error); #ifdef COMPAT_OLDSOCK if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && (flags & ACCEPT4_COMPAT) != 0) ((struct osockaddr *)&ss)->sa_family = ss.ss_family; #endif if (uname != NULL) { addrlen = min(ss.ss_len, addrlen); error = copyout(&ss, uname, addrlen); if (error == 0) { addrlen = ss.ss_len; error = copyout(&addrlen, anamelen, sizeof(addrlen)); } } if (error != 0) fdclose(td, fp, td->td_retval[0]); fdrop(fp, td); return (error); } int kern_accept(struct thread *td, int s, struct sockaddr *sa, struct file **fp) { return (kern_accept4(td, s, sa, ACCEPT4_INHERIT, fp)); } int kern_accept4(struct thread *td, int s, struct sockaddr *sa, int flags, struct file **fp) { struct file *headfp, *nfp = NULL; struct socket *head, *so; struct filecaps fcaps; u_int fflag; pid_t pgid; int error, fd, tmp; AUDIT_ARG_FD(s); error = getsock_cap(td, s, &cap_accept_rights, &headfp, &fcaps); if (error != 0) return (error); fflag = atomic_load_int(&headfp->f_flag); head = headfp->f_data; if (!SOLISTENING(head)) { error = EINVAL; goto done; } #ifdef MAC error = mac_socket_check_accept(td->td_ucred, head); if (error != 0) goto done; #endif error = falloc_caps(td, &nfp, &fd, (flags & SOCK_CLOEXEC) ? O_CLOEXEC : 0, &fcaps); if (error != 0) goto done; SOCK_LOCK(head); if (!SOLISTENING(head)) { SOCK_UNLOCK(head); error = EINVAL; goto noconnection; } error = solisten_dequeue(head, &so, flags); if (error != 0) goto noconnection; /* An extra reference on `nfp' has been held for us by falloc(). */ td->td_retval[0] = fd; /* Connection has been removed from the listen queue. */ KNOTE_UNLOCKED(&head->so_rdsel.si_note, 0); if (flags & ACCEPT4_INHERIT) { pgid = fgetown(&head->so_sigio); if (pgid != 0) fsetown(pgid, &so->so_sigio); } else { fflag &= ~(FNONBLOCK | FASYNC); if (flags & SOCK_NONBLOCK) fflag |= FNONBLOCK; } finit(nfp, fflag, DTYPE_SOCKET, so, &socketops); /* Sync socket nonblocking/async state with file flags */ tmp = fflag & FNONBLOCK; (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td); tmp = fflag & FASYNC; (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td); if ((error = soaccept(so, sa)) == 0) { AUDIT_ARG_SOCKADDR(td, AT_FDCWD, sa); #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT)) ktrsockaddr(sa); #endif } noconnection: /* * close the new descriptor, assuming someone hasn't ripped it * out from under us. */ if (error != 0) fdclose(td, nfp, fd); /* * Release explicitly held references before returning. We return * a reference on nfp to the caller on success if they request it. */ done: if (nfp == NULL) filecaps_free(&fcaps); if (fp != NULL) { if (error == 0) { *fp = nfp; nfp = NULL; } else *fp = NULL; } if (nfp != NULL) fdrop(nfp, td); fdrop(headfp, td); return (error); } int sys_accept(struct thread *td, struct accept_args *uap) { return (accept1(td, uap->s, uap->name, uap->anamelen, ACCEPT4_INHERIT)); } int sys_accept4(struct thread *td, struct accept4_args *uap) { if (uap->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return (EINVAL); return (accept1(td, uap->s, uap->name, uap->anamelen, uap->flags)); } #ifdef COMPAT_OLDSOCK int oaccept(struct thread *td, struct oaccept_args *uap) { return (accept1(td, uap->s, uap->name, uap->anamelen, ACCEPT4_INHERIT | ACCEPT4_COMPAT)); } #endif /* COMPAT_OLDSOCK */ int sys_connect(struct thread *td, struct connect_args *uap) { struct sockaddr *sa; int error; error = getsockaddr(&sa, uap->name, uap->namelen); if (error == 0) { error = kern_connectat(td, AT_FDCWD, uap->s, sa); free(sa, M_SONAME); } return (error); } int kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa) { struct socket *so; struct file *fp; int error; #ifdef CAPABILITY_MODE if (IN_CAPABILITY_MODE(td) && (dirfd == AT_FDCWD)) return (ECAPMODE); #endif AUDIT_ARG_FD(fd); AUDIT_ARG_SOCKADDR(td, dirfd, sa); error = getsock(td, fd, &cap_connect_rights, &fp); if (error != 0) return (error); so = fp->f_data; if (so->so_state & SS_ISCONNECTING) { error = EALREADY; goto done1; } #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT)) ktrsockaddr(sa); #endif #ifdef MAC error = mac_socket_check_connect(td->td_ucred, so, sa); if (error != 0) goto bad; #endif error = soconnectat(dirfd, so, sa, td); if (error != 0) goto bad; if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) { error = EINPROGRESS; goto done1; } SOCK_LOCK(so); while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { error = msleep(&so->so_timeo, &so->so_lock, PSOCK | PCATCH, "connec", 0); if (error != 0) break; } if (error == 0) { error = so->so_error; so->so_error = 0; } SOCK_UNLOCK(so); bad: if (error == ERESTART) error = EINTR; done1: fdrop(fp, td); return (error); } int sys_connectat(struct thread *td, struct connectat_args *uap) { struct sockaddr *sa; int error; error = getsockaddr(&sa, uap->name, uap->namelen); if (error == 0) { error = kern_connectat(td, uap->fd, uap->s, sa); free(sa, M_SONAME); } return (error); } int kern_socketpair(struct thread *td, int domain, int type, int protocol, int *rsv) { struct file *fp1, *fp2; struct socket *so1, *so2; int fd, error, oflag, fflag; AUDIT_ARG_SOCKET(domain, type, protocol); oflag = 0; fflag = 0; if ((type & SOCK_CLOEXEC) != 0) { type &= ~SOCK_CLOEXEC; oflag |= O_CLOEXEC; } if ((type & SOCK_NONBLOCK) != 0) { type &= ~SOCK_NONBLOCK; fflag |= FNONBLOCK; } #ifdef MAC /* We might want to have a separate check for socket pairs. */ error = mac_socket_check_create(td->td_ucred, domain, type, protocol); if (error != 0) return (error); #endif error = socreate(domain, &so1, type, protocol, td->td_ucred, td); if (error != 0) return (error); error = socreate(domain, &so2, type, protocol, td->td_ucred, td); if (error != 0) goto free1; /* On success extra reference to `fp1' and 'fp2' is set by falloc. */ error = falloc(td, &fp1, &fd, oflag); if (error != 0) goto free2; rsv[0] = fd; fp1->f_data = so1; /* so1 already has ref count */ error = falloc(td, &fp2, &fd, oflag); if (error != 0) goto free3; fp2->f_data = so2; /* so2 already has ref count */ rsv[1] = fd; error = soconnect2(so1, so2); if (error != 0) goto free4; if (type == SOCK_DGRAM) { /* * Datagram socket connection is asymmetric. */ error = soconnect2(so2, so1); if (error != 0) goto free4; } else if (so1->so_proto->pr_flags & PR_CONNREQUIRED) { struct unpcb *unp, *unp2; unp = sotounpcb(so1); unp2 = sotounpcb(so2); /* * No need to lock the unps, because the sockets are brand-new. * No other threads can be using them yet */ unp_copy_peercred(td, unp, unp2, unp); } finit(fp1, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp1->f_data, &socketops); finit(fp2, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp2->f_data, &socketops); if ((fflag & FNONBLOCK) != 0) { (void) fo_ioctl(fp1, FIONBIO, &fflag, td->td_ucred, td); (void) fo_ioctl(fp2, FIONBIO, &fflag, td->td_ucred, td); } fdrop(fp1, td); fdrop(fp2, td); return (0); free4: fdclose(td, fp2, rsv[1]); fdrop(fp2, td); free3: fdclose(td, fp1, rsv[0]); fdrop(fp1, td); free2: if (so2 != NULL) (void)soclose(so2); free1: if (so1 != NULL) (void)soclose(so1); return (error); } int sys_socketpair(struct thread *td, struct socketpair_args *uap) { int error, sv[2]; error = kern_socketpair(td, uap->domain, uap->type, uap->protocol, sv); if (error != 0) return (error); error = copyout(sv, uap->rsv, 2 * sizeof(int)); if (error != 0) { (void)kern_close(td, sv[0]); (void)kern_close(td, sv[1]); } return (error); } static int sendit(struct thread *td, int s, struct msghdr *mp, int flags) { struct mbuf *control; struct sockaddr *to; int error; #ifdef CAPABILITY_MODE if (IN_CAPABILITY_MODE(td) && (mp->msg_name != NULL)) return (ECAPMODE); #endif if (mp->msg_name != NULL) { error = getsockaddr(&to, mp->msg_name, mp->msg_namelen); if (error != 0) { to = NULL; goto bad; } mp->msg_name = to; } else { to = NULL; } if (mp->msg_control) { if (mp->msg_controllen < sizeof(struct cmsghdr) #ifdef COMPAT_OLDSOCK && (mp->msg_flags != MSG_COMPAT || !SV_PROC_FLAG(td->td_proc, SV_AOUT)) #endif ) { error = EINVAL; goto bad; } error = sockargs(&control, mp->msg_control, mp->msg_controllen, MT_CONTROL); if (error != 0) goto bad; #ifdef COMPAT_OLDSOCK if (mp->msg_flags == MSG_COMPAT && SV_PROC_FLAG(td->td_proc, SV_AOUT)) { struct cmsghdr *cm; M_PREPEND(control, sizeof(*cm), M_WAITOK); cm = mtod(control, struct cmsghdr *); cm->cmsg_len = control->m_len; cm->cmsg_level = SOL_SOCKET; cm->cmsg_type = SCM_RIGHTS; } #endif } else { control = NULL; } error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE); bad: free(to, M_SONAME); return (error); } int kern_sendit(struct thread *td, int s, struct msghdr *mp, int flags, struct mbuf *control, enum uio_seg segflg) { struct file *fp; struct uio auio; struct iovec *iov; struct socket *so; cap_rights_t *rights; #ifdef KTRACE struct uio *ktruio = NULL; #endif ssize_t len; int i, error; AUDIT_ARG_FD(s); rights = &cap_send_rights; if (mp->msg_name != NULL) { AUDIT_ARG_SOCKADDR(td, AT_FDCWD, mp->msg_name); rights = &cap_send_connect_rights; } error = getsock(td, s, rights, &fp); if (error != 0) { m_freem(control); return (error); } so = (struct socket *)fp->f_data; #ifdef KTRACE if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT)) ktrsockaddr(mp->msg_name); #endif #ifdef MAC if (mp->msg_name != NULL) { error = mac_socket_check_connect(td->td_ucred, so, mp->msg_name); if (error != 0) { m_freem(control); goto bad; } } error = mac_socket_check_send(td->td_ucred, so); if (error != 0) { m_freem(control); goto bad; } #endif auio.uio_iov = mp->msg_iov; auio.uio_iovcnt = mp->msg_iovlen; auio.uio_segflg = segflg; auio.uio_rw = UIO_WRITE; auio.uio_td = td; auio.uio_offset = 0; /* XXX */ auio.uio_resid = 0; iov = mp->msg_iov; for (i = 0; i < mp->msg_iovlen; i++, iov++) { if ((auio.uio_resid += iov->iov_len) < 0) { error = EINVAL; m_freem(control); goto bad; } } #ifdef KTRACE if (KTRPOINT(td, KTR_GENIO)) ktruio = cloneuio(&auio); #endif len = auio.uio_resid; error = sousrsend(so, mp->msg_name, &auio, control, flags, NULL); if (error == 0) td->td_retval[0] = len - auio.uio_resid; #ifdef KTRACE if (ktruio != NULL) { ktruio->uio_resid = td->td_retval[0]; ktrgenio(s, UIO_WRITE, ktruio, error); } #endif bad: fdrop(fp, td); return (error); } int sys_sendto(struct thread *td, struct sendto_args *uap) { struct msghdr msg; struct iovec aiov; msg.msg_name = __DECONST(void *, uap->to); msg.msg_namelen = uap->tolen; msg.msg_iov = &aiov; msg.msg_iovlen = 1; msg.msg_control = 0; #ifdef COMPAT_OLDSOCK if (SV_PROC_FLAG(td->td_proc, SV_AOUT)) msg.msg_flags = 0; #endif aiov.iov_base = __DECONST(void *, uap->buf); aiov.iov_len = uap->len; return (sendit(td, uap->s, &msg, uap->flags)); } #ifdef COMPAT_OLDSOCK int osend(struct thread *td, struct osend_args *uap) { struct msghdr msg; struct iovec aiov; msg.msg_name = 0; msg.msg_namelen = 0; msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = __DECONST(void *, uap->buf); aiov.iov_len = uap->len; msg.msg_control = 0; msg.msg_flags = 0; return (sendit(td, uap->s, &msg, uap->flags)); } int osendmsg(struct thread *td, struct osendmsg_args *uap) { struct msghdr msg; struct iovec *iov; int error; error = copyin(uap->msg, &msg, sizeof (struct omsghdr)); if (error != 0) return (error); error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); if (error != 0) return (error); msg.msg_iov = iov; msg.msg_flags = MSG_COMPAT; error = sendit(td, uap->s, &msg, uap->flags); free(iov, M_IOV); return (error); } #endif int sys_sendmsg(struct thread *td, struct sendmsg_args *uap) { struct msghdr msg; struct iovec *iov; int error; error = copyin(uap->msg, &msg, sizeof (msg)); if (error != 0) return (error); error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); if (error != 0) return (error); msg.msg_iov = iov; #ifdef COMPAT_OLDSOCK if (SV_PROC_FLAG(td->td_proc, SV_AOUT)) msg.msg_flags = 0; #endif error = sendit(td, uap->s, &msg, uap->flags); free(iov, M_IOV); return (error); } int kern_recvit(struct thread *td, int s, struct msghdr *mp, enum uio_seg fromseg, struct mbuf **controlp) { struct uio auio; struct iovec *iov; struct mbuf *control, *m; caddr_t ctlbuf; struct file *fp; struct socket *so; struct sockaddr *fromsa = NULL; #ifdef KTRACE struct uio *ktruio = NULL; #endif ssize_t len; int error, i; if (controlp != NULL) *controlp = NULL; AUDIT_ARG_FD(s); error = getsock(td, s, &cap_recv_rights, &fp); if (error != 0) return (error); so = fp->f_data; #ifdef MAC error = mac_socket_check_receive(td->td_ucred, so); if (error != 0) { fdrop(fp, td); return (error); } #endif auio.uio_iov = mp->msg_iov; auio.uio_iovcnt = mp->msg_iovlen; auio.uio_segflg = UIO_USERSPACE; auio.uio_rw = UIO_READ; auio.uio_td = td; auio.uio_offset = 0; /* XXX */ auio.uio_resid = 0; iov = mp->msg_iov; for (i = 0; i < mp->msg_iovlen; i++, iov++) { if ((auio.uio_resid += iov->iov_len) < 0) { fdrop(fp, td); return (EINVAL); } } #ifdef KTRACE if (KTRPOINT(td, KTR_GENIO)) ktruio = cloneuio(&auio); #endif control = NULL; len = auio.uio_resid; error = soreceive(so, &fromsa, &auio, NULL, (mp->msg_control || controlp) ? &control : NULL, &mp->msg_flags); if (error != 0) { if (auio.uio_resid != len && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; } if (fromsa != NULL) AUDIT_ARG_SOCKADDR(td, AT_FDCWD, fromsa); #ifdef KTRACE if (ktruio != NULL) { /* MSG_TRUNC can trigger underflow of uio_resid. */ ktruio->uio_resid = MIN(len - auio.uio_resid, len); ktrgenio(s, UIO_READ, ktruio, error); } #endif if (error != 0) goto out; td->td_retval[0] = len - auio.uio_resid; if (mp->msg_name) { len = mp->msg_namelen; if (len <= 0 || fromsa == NULL) len = 0; else { /* save sa_len before it is destroyed by MSG_COMPAT */ len = MIN(len, fromsa->sa_len); #ifdef COMPAT_OLDSOCK if ((mp->msg_flags & MSG_COMPAT) != 0 && SV_PROC_FLAG(td->td_proc, SV_AOUT)) ((struct osockaddr *)fromsa)->sa_family = fromsa->sa_family; #endif if (fromseg == UIO_USERSPACE) { error = copyout(fromsa, mp->msg_name, (unsigned)len); if (error != 0) goto out; } else bcopy(fromsa, mp->msg_name, len); } mp->msg_namelen = len; } if (mp->msg_control && controlp == NULL) { #ifdef COMPAT_OLDSOCK /* * We assume that old recvmsg calls won't receive access * rights and other control info, esp. as control info * is always optional and those options didn't exist in 4.3. * If we receive rights, trim the cmsghdr; anything else * is tossed. */ if (control && (mp->msg_flags & MSG_COMPAT) != 0 && SV_PROC_FLAG(td->td_proc, SV_AOUT)) { if (mtod(control, struct cmsghdr *)->cmsg_level != SOL_SOCKET || mtod(control, struct cmsghdr *)->cmsg_type != SCM_RIGHTS) { mp->msg_controllen = 0; goto out; } control->m_len -= sizeof (struct cmsghdr); control->m_data += sizeof (struct cmsghdr); } #endif ctlbuf = mp->msg_control; len = mp->msg_controllen; mp->msg_controllen = 0; for (m = control; m != NULL && len >= m->m_len; m = m->m_next) { if ((error = copyout(mtod(m, caddr_t), ctlbuf, m->m_len)) != 0) goto out; ctlbuf += m->m_len; len -= m->m_len; mp->msg_controllen += m->m_len; } if (m != NULL) { mp->msg_flags |= MSG_CTRUNC; m_dispose_extcontrolm(m); } } out: fdrop(fp, td); #ifdef KTRACE if (fromsa && KTRPOINT(td, KTR_STRUCT)) ktrsockaddr(fromsa); #endif free(fromsa, M_SONAME); if (error == 0 && controlp != NULL) *controlp = control; else if (control != NULL) { if (error != 0) m_dispose_extcontrolm(control); m_freem(control); } return (error); } static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp) { int error; error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL); if (error != 0) return (error); if (namelenp != NULL) { error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t)); #ifdef COMPAT_OLDSOCK if ((mp->msg_flags & MSG_COMPAT) != 0 && SV_PROC_FLAG(td->td_proc, SV_AOUT)) error = 0; /* old recvfrom didn't check */ #endif } return (error); } static int kern_recvfrom(struct thread *td, int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr) { struct msghdr msg; struct iovec aiov; int error; if (fromlenaddr != NULL) { error = copyin(fromlenaddr, &msg.msg_namelen, sizeof (msg.msg_namelen)); if (error != 0) goto done2; } else { msg.msg_namelen = 0; } msg.msg_name = from; msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = buf; aiov.iov_len = len; msg.msg_control = 0; msg.msg_flags = flags; error = recvit(td, s, &msg, fromlenaddr); done2: return (error); } int sys_recvfrom(struct thread *td, struct recvfrom_args *uap) { return (kern_recvfrom(td, uap->s, uap->buf, uap->len, uap->flags, uap->from, uap->fromlenaddr)); } #ifdef COMPAT_OLDSOCK int orecvfrom(struct thread *td, struct orecvfrom_args *uap) { return (kern_recvfrom(td, uap->s, uap->buf, uap->len, uap->flags | MSG_COMPAT, uap->from, uap->fromlenaddr)); } #endif #ifdef COMPAT_OLDSOCK int orecv(struct thread *td, struct orecv_args *uap) { struct msghdr msg; struct iovec aiov; msg.msg_name = 0; msg.msg_namelen = 0; msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = uap->buf; aiov.iov_len = uap->len; msg.msg_control = 0; msg.msg_flags = uap->flags; return (recvit(td, uap->s, &msg, NULL)); } /* * Old recvmsg. This code takes advantage of the fact that the old msghdr * overlays the new one, missing only the flags, and with the (old) access * rights where the control fields are now. */ int orecvmsg(struct thread *td, struct orecvmsg_args *uap) { struct msghdr msg; struct iovec *iov; int error; error = copyin(uap->msg, &msg, sizeof (struct omsghdr)); if (error != 0) return (error); error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); if (error != 0) return (error); msg.msg_flags = uap->flags | MSG_COMPAT; msg.msg_iov = iov; error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen); if (msg.msg_controllen && error == 0) error = copyout(&msg.msg_controllen, &uap->msg->msg_accrightslen, sizeof (int)); free(iov, M_IOV); return (error); } #endif int sys_recvmsg(struct thread *td, struct recvmsg_args *uap) { struct msghdr msg; struct iovec *uiov, *iov; int error; error = copyin(uap->msg, &msg, sizeof (msg)); if (error != 0) return (error); error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); if (error != 0) return (error); msg.msg_flags = uap->flags; #ifdef COMPAT_OLDSOCK if (SV_PROC_FLAG(td->td_proc, SV_AOUT)) msg.msg_flags &= ~MSG_COMPAT; #endif uiov = msg.msg_iov; msg.msg_iov = iov; error = recvit(td, uap->s, &msg, NULL); if (error == 0) { msg.msg_iov = uiov; error = copyout(&msg, uap->msg, sizeof(msg)); } free(iov, M_IOV); return (error); } int sys_shutdown(struct thread *td, struct shutdown_args *uap) { return (kern_shutdown(td, uap->s, uap->how)); } int kern_shutdown(struct thread *td, int s, int how) { struct socket *so; struct file *fp; int error; AUDIT_ARG_FD(s); error = getsock(td, s, &cap_shutdown_rights, &fp); if (error == 0) { so = fp->f_data; error = soshutdown(so, how); /* * Previous versions did not return ENOTCONN, but 0 in * case the socket was not connected. Some important * programs like syslogd up to r279016, 2015-02-19, * still depend on this behavior. */ if (error == ENOTCONN && td->td_proc->p_osrel < P_OSREL_SHUTDOWN_ENOTCONN) error = 0; fdrop(fp, td); } return (error); } int sys_setsockopt(struct thread *td, struct setsockopt_args *uap) { return (kern_setsockopt(td, uap->s, uap->level, uap->name, uap->val, UIO_USERSPACE, uap->valsize)); } int kern_setsockopt(struct thread *td, int s, int level, int name, const void *val, enum uio_seg valseg, socklen_t valsize) { struct socket *so; struct file *fp; struct sockopt sopt; int error; if (val == NULL && valsize != 0) return (EFAULT); if ((int)valsize < 0) return (EINVAL); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = level; sopt.sopt_name = name; sopt.sopt_val = __DECONST(void *, val); sopt.sopt_valsize = valsize; switch (valseg) { case UIO_USERSPACE: sopt.sopt_td = td; break; case UIO_SYSSPACE: sopt.sopt_td = NULL; break; default: panic("kern_setsockopt called with bad valseg"); } AUDIT_ARG_FD(s); error = getsock(td, s, &cap_setsockopt_rights, &fp); if (error == 0) { so = fp->f_data; error = sosetopt(so, &sopt); fdrop(fp, td); } return(error); } int sys_getsockopt(struct thread *td, struct getsockopt_args *uap) { socklen_t valsize; int error; if (uap->val) { error = copyin(uap->avalsize, &valsize, sizeof (valsize)); if (error != 0) return (error); } error = kern_getsockopt(td, uap->s, uap->level, uap->name, uap->val, UIO_USERSPACE, &valsize); if (error == 0) error = copyout(&valsize, uap->avalsize, sizeof (valsize)); return (error); } /* * Kernel version of getsockopt. * optval can be a userland or userspace. optlen is always a kernel pointer. */ int kern_getsockopt(struct thread *td, int s, int level, int name, void *val, enum uio_seg valseg, socklen_t *valsize) { struct socket *so; struct file *fp; struct sockopt sopt; int error; if (val == NULL) *valsize = 0; if ((int)*valsize < 0) return (EINVAL); sopt.sopt_dir = SOPT_GET; sopt.sopt_level = level; sopt.sopt_name = name; sopt.sopt_val = val; sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */ switch (valseg) { case UIO_USERSPACE: sopt.sopt_td = td; break; case UIO_SYSSPACE: sopt.sopt_td = NULL; break; default: panic("kern_getsockopt called with bad valseg"); } AUDIT_ARG_FD(s); error = getsock(td, s, &cap_getsockopt_rights, &fp); if (error == 0) { so = fp->f_data; error = sogetopt(so, &sopt); *valsize = sopt.sopt_valsize; fdrop(fp, td); } return (error); } static int user_getsockname(struct thread *td, int fdes, struct sockaddr *asa, socklen_t *alen, bool compat) { - struct sockaddr *sa; + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; socklen_t len; int error; error = copyin(alen, &len, sizeof(len)); if (error != 0) return (error); - error = kern_getsockname(td, fdes, &sa, &len); + error = kern_getsockname(td, fdes, (struct sockaddr *)&ss); if (error != 0) return (error); - if (len != 0) { #ifdef COMPAT_OLDSOCK - if (compat && SV_PROC_FLAG(td->td_proc, SV_AOUT)) - ((struct osockaddr *)sa)->sa_family = sa->sa_family; + if (compat && SV_PROC_FLAG(td->td_proc, SV_AOUT)) + ((struct osockaddr *)&ss)->sa_family = ss.ss_family; #endif - error = copyout(sa, asa, len); - } - free(sa, M_SONAME); - if (error == 0) + len = min(ss.ss_len, len); + error = copyout(&ss, asa, len); + if (error == 0) { + len = ss.ss_len; error = copyout(&len, alen, sizeof(len)); + } return (error); } int -kern_getsockname(struct thread *td, int fd, struct sockaddr **sa, - socklen_t *alen) +kern_getsockname(struct thread *td, int fd, struct sockaddr *sa) { struct socket *so; struct file *fp; - socklen_t len; int error; AUDIT_ARG_FD(fd); error = getsock(td, fd, &cap_getsockname_rights, &fp); if (error != 0) return (error); so = fp->f_data; - *sa = NULL; - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_sockaddr(so, sa); - CURVNET_RESTORE(); - if (error != 0) - goto bad; - if (*sa == NULL) - len = 0; - else - len = MIN(*alen, (*sa)->sa_len); - *alen = len; + error = sosockaddr(so, sa); #ifdef KTRACE - if (KTRPOINT(td, KTR_STRUCT)) - ktrsockaddr(*sa); + if (error == 0 && KTRPOINT(td, KTR_STRUCT)) + ktrsockaddr(sa); #endif -bad: fdrop(fp, td); - if (error != 0 && *sa != NULL) { - free(*sa, M_SONAME); - *sa = NULL; - } return (error); } int sys_getsockname(struct thread *td, struct getsockname_args *uap) { return (user_getsockname(td, uap->fdes, uap->asa, uap->alen, false)); } #ifdef COMPAT_OLDSOCK int ogetsockname(struct thread *td, struct ogetsockname_args *uap) { return (user_getsockname(td, uap->fdes, uap->asa, uap->alen, true)); } #endif /* COMPAT_OLDSOCK */ static int user_getpeername(struct thread *td, int fdes, struct sockaddr *asa, socklen_t *alen, bool compat) { - struct sockaddr *sa; + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; socklen_t len; int error; error = copyin(alen, &len, sizeof (len)); if (error != 0) return (error); - error = kern_getpeername(td, fdes, &sa, &len); + error = kern_getpeername(td, fdes, (struct sockaddr *)&ss); if (error != 0) return (error); - if (len != 0) { #ifdef COMPAT_OLDSOCK - if (compat && SV_PROC_FLAG(td->td_proc, SV_AOUT)) - ((struct osockaddr *)sa)->sa_family = sa->sa_family; + if (compat && SV_PROC_FLAG(td->td_proc, SV_AOUT)) + ((struct osockaddr *)&ss)->sa_family = ss.ss_family; #endif - error = copyout(sa, asa, len); - } - free(sa, M_SONAME); - if (error == 0) + len = min(ss.ss_len, len); + error = copyout(&ss, asa, len); + if (error == 0) { + len = ss.ss_len; error = copyout(&len, alen, sizeof(len)); + } return (error); } int -kern_getpeername(struct thread *td, int fd, struct sockaddr **sa, - socklen_t *alen) +kern_getpeername(struct thread *td, int fd, struct sockaddr *sa) { struct socket *so; struct file *fp; - socklen_t len; int error; AUDIT_ARG_FD(fd); error = getsock(td, fd, &cap_getpeername_rights, &fp); if (error != 0) return (error); so = fp->f_data; if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { error = ENOTCONN; goto done; } - *sa = NULL; - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_peeraddr(so, sa); - CURVNET_RESTORE(); - if (error != 0) - goto bad; - if (*sa == NULL) - len = 0; - else - len = MIN(*alen, (*sa)->sa_len); - *alen = len; + error = sopeeraddr(so, sa); #ifdef KTRACE - if (KTRPOINT(td, KTR_STRUCT)) - ktrsockaddr(*sa); + if (error == 0 && KTRPOINT(td, KTR_STRUCT)) + ktrsockaddr(sa); #endif -bad: - if (error != 0 && *sa != NULL) { - free(*sa, M_SONAME); - *sa = NULL; - } done: fdrop(fp, td); return (error); } int sys_getpeername(struct thread *td, struct getpeername_args *uap) { return (user_getpeername(td, uap->fdes, uap->asa, uap->alen, false)); } #ifdef COMPAT_OLDSOCK int ogetpeername(struct thread *td, struct ogetpeername_args *uap) { return (user_getpeername(td, uap->fdes, uap->asa, uap->alen, true)); } #endif /* COMPAT_OLDSOCK */ static int sockargs(struct mbuf **mp, char *buf, socklen_t buflen, int type) { struct sockaddr *sa; struct mbuf *m; int error; if (buflen > MLEN) { #ifdef COMPAT_OLDSOCK if (type == MT_SONAME && buflen <= 112 && SV_CURPROC_FLAG(SV_AOUT)) buflen = MLEN; /* unix domain compat. hack */ else #endif if (buflen > MCLBYTES) return (EMSGSIZE); } m = m_get2(buflen, M_WAITOK, type, 0); m->m_len = buflen; error = copyin(buf, mtod(m, void *), buflen); if (error != 0) (void) m_free(m); else { *mp = m; if (type == MT_SONAME) { sa = mtod(m, struct sockaddr *); #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN if (sa->sa_family == 0 && sa->sa_len < AF_MAX && SV_CURPROC_FLAG(SV_AOUT)) sa->sa_family = sa->sa_len; #endif sa->sa_len = buflen; } } return (error); } int getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr, size_t len) { struct sockaddr *sa; int error; if (len > SOCK_MAXADDRLEN) return (ENAMETOOLONG); if (len < offsetof(struct sockaddr, sa_data[0])) return (EINVAL); sa = malloc(len, M_SONAME, M_WAITOK); error = copyin(uaddr, sa, len); if (error != 0) { free(sa, M_SONAME); } else { #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN if (sa->sa_family == 0 && sa->sa_len < AF_MAX && SV_CURPROC_FLAG(SV_AOUT)) sa->sa_family = sa->sa_len; #endif sa->sa_len = len; *namp = sa; } return (error); } /* * Dispose of externalized rights from an SCM_RIGHTS message. This function * should be used in error or truncation cases to avoid leaking file descriptors * into the recipient's (the current thread's) table. */ void m_dispose_extcontrolm(struct mbuf *m) { struct cmsghdr *cm; struct file *fp; struct thread *td; socklen_t clen, datalen; int error, fd, *fds, nfd; td = curthread; for (; m != NULL; m = m->m_next) { if (m->m_type != MT_EXTCONTROL) continue; cm = mtod(m, struct cmsghdr *); clen = m->m_len; while (clen > 0) { if (clen < sizeof(*cm)) panic("%s: truncated mbuf %p", __func__, m); datalen = CMSG_SPACE(cm->cmsg_len - CMSG_SPACE(0)); if (clen < datalen) panic("%s: truncated mbuf %p", __func__, m); if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS) { fds = (int *)CMSG_DATA(cm); nfd = (cm->cmsg_len - CMSG_SPACE(0)) / sizeof(int); while (nfd-- > 0) { fd = *fds++; error = fget(td, fd, &cap_no_rights, &fp); if (error == 0) { fdclose(td, fp, fd); fdrop(fp, td); } } } clen -= datalen; cm = (struct cmsghdr *)((uint8_t *)cm + datalen); } m_chtype(m, MT_CONTROL); } } diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c index 3071a5169b72..069294e1c963 100644 --- a/sys/kern/uipc_usrreq.c +++ b/sys/kern/uipc_usrreq.c @@ -1,3563 +1,3533 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All Rights Reserved. * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved. * Copyright (c) 2018 Matthew Macy * Copyright (c) 2022 Gleb Smirnoff * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * UNIX Domain (Local) Sockets * * This is an implementation of UNIX (local) domain sockets. Each socket has * an associated struct unpcb (UNIX protocol control block). Stream sockets * may be connected to 0 or 1 other socket. Datagram sockets may be * connected to 0, 1, or many other sockets. Sockets may be created and * connected in pairs (socketpair(2)), or bound/connected to using the file * system name space. For most purposes, only the receive socket buffer is * used, as sending on one socket delivers directly to the receive socket * buffer of a second socket. * * The implementation is substantially complicated by the fact that * "ancillary data", such as file descriptors or credentials, may be passed * across UNIX domain sockets. The potential for passing UNIX domain sockets * over other UNIX domain sockets requires the implementation of a simple * garbage collector to find and tear down cycles of disconnected sockets. * * TODO: * RDM * rethink name space problems * need a proper out-of-band */ #include #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include MALLOC_DECLARE(M_FILECAPS); static struct domain localdomain; static uma_zone_t unp_zone; static unp_gen_t unp_gencnt; /* (l) */ static u_int unp_count; /* (l) Count of local sockets. */ static ino_t unp_ino; /* Prototype for fake inode numbers. */ static int unp_rights; /* (g) File descriptors in flight. */ static struct unp_head unp_shead; /* (l) List of stream sockets. */ static struct unp_head unp_dhead; /* (l) List of datagram sockets. */ static struct unp_head unp_sphead; /* (l) List of seqpacket sockets. */ struct unp_defer { SLIST_ENTRY(unp_defer) ud_link; struct file *ud_fp; }; static SLIST_HEAD(, unp_defer) unp_defers; static int unp_defers_count; static const struct sockaddr sun_noname = { .sa_len = sizeof(sun_noname), .sa_family = AF_LOCAL, }; /* * Garbage collection of cyclic file descriptor/socket references occurs * asynchronously in a taskqueue context in order to avoid recursion and * reentrance in the UNIX domain socket, file descriptor, and socket layer * code. See unp_gc() for a full description. */ static struct timeout_task unp_gc_task; /* * The close of unix domain sockets attached as SCM_RIGHTS is * postponed to the taskqueue, to avoid arbitrary recursion depth. * The attached sockets might have another sockets attached. */ static struct task unp_defer_task; /* * Both send and receive buffers are allocated PIPSIZ bytes of buffering for * stream sockets, although the total for sender and receiver is actually * only PIPSIZ. * * Datagram sockets really use the sendspace as the maximum datagram size, * and don't really want to reserve the sendspace. Their recvspace should be * large enough for at least one max-size datagram plus address. */ #ifndef PIPSIZ #define PIPSIZ 8192 #endif static u_long unpst_sendspace = PIPSIZ; static u_long unpst_recvspace = PIPSIZ; static u_long unpdg_maxdgram = 2*1024; static u_long unpdg_recvspace = 16*1024; /* support 8KB syslog msgs */ static u_long unpsp_sendspace = PIPSIZ; /* really max datagram size */ static u_long unpsp_recvspace = PIPSIZ; static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Local domain"); static SYSCTL_NODE(_net_local, SOCK_STREAM, stream, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "SOCK_STREAM"); static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "SOCK_DGRAM"); static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "SOCK_SEQPACKET"); SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, &unpst_sendspace, 0, "Default stream send space."); SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, &unpst_recvspace, 0, "Default stream receive space."); SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, &unpdg_maxdgram, 0, "Maximum datagram size."); SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, &unpdg_recvspace, 0, "Default datagram receive space."); SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW, &unpsp_sendspace, 0, "Default seqpacket send space."); SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW, &unpsp_recvspace, 0, "Default seqpacket receive space."); SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, "File descriptors in flight."); SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD, &unp_defers_count, 0, "File descriptors deferred to taskqueue for close."); /* * Locking and synchronization: * * Several types of locks exist in the local domain socket implementation: * - a global linkage lock * - a global connection list lock * - the mtxpool lock * - per-unpcb mutexes * * The linkage lock protects the global socket lists, the generation number * counter and garbage collector state. * * The connection list lock protects the list of referring sockets in a datagram * socket PCB. This lock is also overloaded to protect a global list of * sockets whose buffers contain socket references in the form of SCM_RIGHTS * messages. To avoid recursion, such references are released by a dedicated * thread. * * The mtxpool lock protects the vnode from being modified while referenced. * Lock ordering rules require that it be acquired before any PCB locks. * * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the * unpcb. This includes the unp_conn field, which either links two connected * PCBs together (for connected socket types) or points at the destination * socket (for connectionless socket types). The operations of creating or * destroying a connection therefore involve locking multiple PCBs. To avoid * lock order reversals, in some cases this involves dropping a PCB lock and * using a reference counter to maintain liveness. * * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer, * allocated in pr_attach() and freed in pr_detach(). The validity of that * pointer is an invariant, so no lock is required to dereference the so_pcb * pointer if a valid socket reference is held by the caller. In practice, * this is always true during operations performed on a socket. Each unpcb * has a back-pointer to its socket, unp_socket, which will be stable under * the same circumstances. * * This pointer may only be safely dereferenced as long as a valid reference * to the unpcb is held. Typically, this reference will be from the socket, * or from another unpcb when the referring unpcb's lock is held (in order * that the reference not be invalidated during use). For example, to follow * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee * that detach is not run clearing unp_socket. * * Blocking with UNIX domain sockets is a tricky issue: unlike most network * protocols, bind() is a non-atomic operation, and connect() requires * potential sleeping in the protocol, due to potentially waiting on local or * distributed file systems. We try to separate "lookup" operations, which * may sleep, and the IPC operations themselves, which typically can occur * with relative atomicity as locks can be held over the entire operation. * * Another tricky issue is simultaneous multi-threaded or multi-process * access to a single UNIX domain socket. These are handled by the flags * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or * binding, both of which involve dropping UNIX domain socket locks in order * to perform namei() and other file system operations. */ static struct rwlock unp_link_rwlock; static struct mtx unp_defers_lock; #define UNP_LINK_LOCK_INIT() rw_init(&unp_link_rwlock, \ "unp_link_rwlock") #define UNP_LINK_LOCK_ASSERT() rw_assert(&unp_link_rwlock, \ RA_LOCKED) #define UNP_LINK_UNLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ RA_UNLOCKED) #define UNP_LINK_RLOCK() rw_rlock(&unp_link_rwlock) #define UNP_LINK_RUNLOCK() rw_runlock(&unp_link_rwlock) #define UNP_LINK_WLOCK() rw_wlock(&unp_link_rwlock) #define UNP_LINK_WUNLOCK() rw_wunlock(&unp_link_rwlock) #define UNP_LINK_WLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ RA_WLOCKED) #define UNP_LINK_WOWNED() rw_wowned(&unp_link_rwlock) #define UNP_DEFERRED_LOCK_INIT() mtx_init(&unp_defers_lock, \ "unp_defer", NULL, MTX_DEF) #define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock) #define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock) #define UNP_REF_LIST_LOCK() UNP_DEFERRED_LOCK(); #define UNP_REF_LIST_UNLOCK() UNP_DEFERRED_UNLOCK(); #define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \ "unp", "unp", \ MTX_DUPOK|MTX_DEF) #define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx) #define UNP_PCB_LOCKPTR(unp) (&(unp)->unp_mtx) #define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx) #define UNP_PCB_TRYLOCK(unp) mtx_trylock(&(unp)->unp_mtx) #define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx) #define UNP_PCB_OWNED(unp) mtx_owned(&(unp)->unp_mtx) #define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED) #define UNP_PCB_UNLOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED) static int uipc_connect2(struct socket *, struct socket *); static int uipc_ctloutput(struct socket *, struct sockopt *); static int unp_connect(struct socket *, struct sockaddr *, struct thread *); static int unp_connectat(int, struct socket *, struct sockaddr *, struct thread *, bool); typedef enum { PRU_CONNECT, PRU_CONNECT2 } conn2_how; static void unp_connect2(struct socket *so, struct socket *so2, conn2_how); static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2); static void unp_dispose(struct socket *so); static void unp_shutdown(struct unpcb *); static void unp_drop(struct unpcb *); static void unp_gc(__unused void *, int); static void unp_scan(struct mbuf *, void (*)(struct filedescent **, int)); static void unp_discard(struct file *); static void unp_freerights(struct filedescent **, int); static int unp_internalize(struct mbuf **, struct thread *, struct mbuf **, u_int *, u_int *); static void unp_internalize_fp(struct file *); static int unp_externalize(struct mbuf *, struct mbuf **, int); static int unp_externalize_fp(struct file *); static struct mbuf *unp_addsockcred(struct thread *, struct mbuf *, int, struct mbuf **, u_int *, u_int *); static void unp_process_defers(void * __unused, int); static void unp_pcb_hold(struct unpcb *unp) { u_int old __unused; old = refcount_acquire(&unp->unp_refcount); KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp)); } static __result_use_check bool unp_pcb_rele(struct unpcb *unp) { bool ret; UNP_PCB_LOCK_ASSERT(unp); if ((ret = refcount_release(&unp->unp_refcount))) { UNP_PCB_UNLOCK(unp); UNP_PCB_LOCK_DESTROY(unp); uma_zfree(unp_zone, unp); } return (ret); } static void unp_pcb_rele_notlast(struct unpcb *unp) { bool ret __unused; ret = refcount_release(&unp->unp_refcount); KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp)); } static void unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2) { UNP_PCB_UNLOCK_ASSERT(unp); UNP_PCB_UNLOCK_ASSERT(unp2); if (unp == unp2) { UNP_PCB_LOCK(unp); } else if ((uintptr_t)unp2 > (uintptr_t)unp) { UNP_PCB_LOCK(unp); UNP_PCB_LOCK(unp2); } else { UNP_PCB_LOCK(unp2); UNP_PCB_LOCK(unp); } } static void unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2) { UNP_PCB_UNLOCK(unp); if (unp != unp2) UNP_PCB_UNLOCK(unp2); } /* * Try to lock the connected peer of an already locked socket. In some cases * this requires that we unlock the current socket. The pairbusy counter is * used to block concurrent connection attempts while the lock is dropped. The * caller must be careful to revalidate PCB state. */ static struct unpcb * unp_pcb_lock_peer(struct unpcb *unp) { struct unpcb *unp2; UNP_PCB_LOCK_ASSERT(unp); unp2 = unp->unp_conn; if (unp2 == NULL) return (NULL); if (__predict_false(unp == unp2)) return (unp); UNP_PCB_UNLOCK_ASSERT(unp2); if (__predict_true(UNP_PCB_TRYLOCK(unp2))) return (unp2); if ((uintptr_t)unp2 > (uintptr_t)unp) { UNP_PCB_LOCK(unp2); return (unp2); } unp->unp_pairbusy++; unp_pcb_hold(unp2); UNP_PCB_UNLOCK(unp); UNP_PCB_LOCK(unp2); UNP_PCB_LOCK(unp); KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL, ("%s: socket %p was reconnected", __func__, unp)); if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) { unp->unp_flags &= ~UNP_WAITING; wakeup(unp); } if (unp_pcb_rele(unp2)) { /* unp2 is unlocked. */ return (NULL); } if (unp->unp_conn == NULL) { UNP_PCB_UNLOCK(unp2); return (NULL); } return (unp2); } static void uipc_abort(struct socket *so) { struct unpcb *unp, *unp2; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_abort: unp == NULL")); UNP_PCB_UNLOCK_ASSERT(unp); UNP_PCB_LOCK(unp); unp2 = unp->unp_conn; if (unp2 != NULL) { unp_pcb_hold(unp2); UNP_PCB_UNLOCK(unp); unp_drop(unp2); } else UNP_PCB_UNLOCK(unp); } -static int -uipc_accept(struct socket *so, struct sockaddr *ret) -{ - struct unpcb *unp, *unp2; - const struct sockaddr *sa; - - /* - * Pass back name of connected socket, if it was bound and we are - * still connected (our peer may have closed already!). - */ - unp = sotounpcb(so); - KASSERT(unp != NULL, ("uipc_accept: unp == NULL")); - - UNP_PCB_LOCK(unp); - unp2 = unp_pcb_lock_peer(unp); - if (unp2 != NULL && unp2->unp_addr != NULL) - sa = (struct sockaddr *)unp2->unp_addr; - else - sa = &sun_noname; - bcopy(sa, ret, sa->sa_len); - if (unp2 != NULL) - unp_pcb_unlock_pair(unp, unp2); - else - UNP_PCB_UNLOCK(unp); - return (0); -} - static int uipc_attach(struct socket *so, int proto, struct thread *td) { u_long sendspace, recvspace; struct unpcb *unp; int error; bool locked; KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL")); if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { switch (so->so_type) { case SOCK_STREAM: sendspace = unpst_sendspace; recvspace = unpst_recvspace; break; case SOCK_DGRAM: STAILQ_INIT(&so->so_rcv.uxdg_mb); STAILQ_INIT(&so->so_snd.uxdg_mb); TAILQ_INIT(&so->so_rcv.uxdg_conns); /* * Since send buffer is either bypassed or is a part * of one-to-many receive buffer, we assign both space * limits to unpdg_recvspace. */ sendspace = recvspace = unpdg_recvspace; break; case SOCK_SEQPACKET: sendspace = unpsp_sendspace; recvspace = unpsp_recvspace; break; default: panic("uipc_attach"); } error = soreserve(so, sendspace, recvspace); if (error) return (error); } unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO); if (unp == NULL) return (ENOBUFS); LIST_INIT(&unp->unp_refs); UNP_PCB_LOCK_INIT(unp); unp->unp_socket = so; so->so_pcb = unp; refcount_init(&unp->unp_refcount, 1); if ((locked = UNP_LINK_WOWNED()) == false) UNP_LINK_WLOCK(); unp->unp_gencnt = ++unp_gencnt; unp->unp_ino = ++unp_ino; unp_count++; switch (so->so_type) { case SOCK_STREAM: LIST_INSERT_HEAD(&unp_shead, unp, unp_link); break; case SOCK_DGRAM: LIST_INSERT_HEAD(&unp_dhead, unp, unp_link); break; case SOCK_SEQPACKET: LIST_INSERT_HEAD(&unp_sphead, unp, unp_link); break; default: panic("uipc_attach"); } if (locked == false) UNP_LINK_WUNLOCK(); return (0); } static int uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { struct sockaddr_un *soun = (struct sockaddr_un *)nam; struct vattr vattr; int error, namelen; struct nameidata nd; struct unpcb *unp; struct vnode *vp; struct mount *mp; cap_rights_t rights; char *buf; if (nam->sa_family != AF_UNIX) return (EAFNOSUPPORT); unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_bind: unp == NULL")); if (soun->sun_len > sizeof(struct sockaddr_un)) return (EINVAL); namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); if (namelen <= 0) return (EINVAL); /* * We don't allow simultaneous bind() calls on a single UNIX domain * socket, so flag in-progress operations, and return an error if an * operation is already in progress. * * Historically, we have not allowed a socket to be rebound, so this * also returns an error. Not allowing re-binding simplifies the * implementation and avoids a great many possible failure modes. */ UNP_PCB_LOCK(unp); if (unp->unp_vnode != NULL) { UNP_PCB_UNLOCK(unp); return (EINVAL); } if (unp->unp_flags & UNP_BINDING) { UNP_PCB_UNLOCK(unp); return (EALREADY); } unp->unp_flags |= UNP_BINDING; UNP_PCB_UNLOCK(unp); buf = malloc(namelen + 1, M_TEMP, M_WAITOK); bcopy(soun->sun_path, buf, namelen); buf[namelen] = 0; restart: NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | NOCACHE, UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT)); /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ error = namei(&nd); if (error) goto error; vp = nd.ni_vp; if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) { NDFREE_PNBUF(&nd); if (nd.ni_dvp == vp) vrele(nd.ni_dvp); else vput(nd.ni_dvp); if (vp != NULL) { vrele(vp); error = EADDRINUSE; goto error; } error = vn_start_write(NULL, &mp, V_XSLEEP | V_PCATCH); if (error) goto error; goto restart; } VATTR_NULL(&vattr); vattr.va_type = VSOCK; vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_pd->pd_cmask); #ifdef MAC error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd, &vattr); #endif if (error == 0) error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); NDFREE_PNBUF(&nd); if (error) { VOP_VPUT_PAIR(nd.ni_dvp, NULL, true); vn_finished_write(mp); if (error == ERELOOKUP) goto restart; goto error; } vp = nd.ni_vp; ASSERT_VOP_ELOCKED(vp, "uipc_bind"); soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK); UNP_PCB_LOCK(unp); VOP_UNP_BIND(vp, unp); unp->unp_vnode = vp; unp->unp_addr = soun; unp->unp_flags &= ~UNP_BINDING; UNP_PCB_UNLOCK(unp); vref(vp); VOP_VPUT_PAIR(nd.ni_dvp, &vp, true); vn_finished_write(mp); free(buf, M_TEMP); return (0); error: UNP_PCB_LOCK(unp); unp->unp_flags &= ~UNP_BINDING; UNP_PCB_UNLOCK(unp); free(buf, M_TEMP); return (error); } static int uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { return (uipc_bindat(AT_FDCWD, so, nam, td)); } static int uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { int error; KASSERT(td == curthread, ("uipc_connect: td != curthread")); error = unp_connect(so, nam, td); return (error); } static int uipc_connectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { int error; KASSERT(td == curthread, ("uipc_connectat: td != curthread")); error = unp_connectat(fd, so, nam, td, false); return (error); } static void uipc_close(struct socket *so) { struct unpcb *unp, *unp2; struct vnode *vp = NULL; struct mtx *vplock; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_close: unp == NULL")); vplock = NULL; if ((vp = unp->unp_vnode) != NULL) { vplock = mtx_pool_find(mtxpool_sleep, vp); mtx_lock(vplock); } UNP_PCB_LOCK(unp); if (vp && unp->unp_vnode == NULL) { mtx_unlock(vplock); vp = NULL; } if (vp != NULL) { VOP_UNP_DETACH(vp); unp->unp_vnode = NULL; } if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) unp_disconnect(unp, unp2); else UNP_PCB_UNLOCK(unp); if (vp) { mtx_unlock(vplock); vrele(vp); } } static int uipc_connect2(struct socket *so1, struct socket *so2) { struct unpcb *unp, *unp2; if (so1->so_type != so2->so_type) return (EPROTOTYPE); unp = so1->so_pcb; KASSERT(unp != NULL, ("uipc_connect2: unp == NULL")); unp2 = so2->so_pcb; KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL")); unp_pcb_lock_pair(unp, unp2); unp_connect2(so1, so2, PRU_CONNECT2); unp_pcb_unlock_pair(unp, unp2); return (0); } static void uipc_detach(struct socket *so) { struct unpcb *unp, *unp2; struct mtx *vplock; struct vnode *vp; int local_unp_rights; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_detach: unp == NULL")); vp = NULL; vplock = NULL; UNP_LINK_WLOCK(); LIST_REMOVE(unp, unp_link); if (unp->unp_gcflag & UNPGC_DEAD) LIST_REMOVE(unp, unp_dead); unp->unp_gencnt = ++unp_gencnt; --unp_count; UNP_LINK_WUNLOCK(); UNP_PCB_UNLOCK_ASSERT(unp); restart: if ((vp = unp->unp_vnode) != NULL) { vplock = mtx_pool_find(mtxpool_sleep, vp); mtx_lock(vplock); } UNP_PCB_LOCK(unp); if (unp->unp_vnode != vp && unp->unp_vnode != NULL) { if (vplock) mtx_unlock(vplock); UNP_PCB_UNLOCK(unp); goto restart; } if ((vp = unp->unp_vnode) != NULL) { VOP_UNP_DETACH(vp); unp->unp_vnode = NULL; } if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) unp_disconnect(unp, unp2); else UNP_PCB_UNLOCK(unp); UNP_REF_LIST_LOCK(); while (!LIST_EMPTY(&unp->unp_refs)) { struct unpcb *ref = LIST_FIRST(&unp->unp_refs); unp_pcb_hold(ref); UNP_REF_LIST_UNLOCK(); MPASS(ref != unp); UNP_PCB_UNLOCK_ASSERT(ref); unp_drop(ref); UNP_REF_LIST_LOCK(); } UNP_REF_LIST_UNLOCK(); UNP_PCB_LOCK(unp); local_unp_rights = unp_rights; unp->unp_socket->so_pcb = NULL; unp->unp_socket = NULL; free(unp->unp_addr, M_SONAME); unp->unp_addr = NULL; if (!unp_pcb_rele(unp)) UNP_PCB_UNLOCK(unp); if (vp) { mtx_unlock(vplock); vrele(vp); } if (local_unp_rights) taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1); switch (so->so_type) { case SOCK_DGRAM: /* * Everything should have been unlinked/freed by unp_dispose() * and/or unp_disconnect(). */ MPASS(so->so_rcv.uxdg_peeked == NULL); MPASS(STAILQ_EMPTY(&so->so_rcv.uxdg_mb)); MPASS(TAILQ_EMPTY(&so->so_rcv.uxdg_conns)); MPASS(STAILQ_EMPTY(&so->so_snd.uxdg_mb)); } } static int uipc_disconnect(struct socket *so) { struct unpcb *unp, *unp2; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL")); UNP_PCB_LOCK(unp); if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) unp_disconnect(unp, unp2); else UNP_PCB_UNLOCK(unp); return (0); } static int uipc_listen(struct socket *so, int backlog, struct thread *td) { struct unpcb *unp; int error; MPASS(so->so_type != SOCK_DGRAM); /* * Synchronize with concurrent connection attempts. */ error = 0; unp = sotounpcb(so); UNP_PCB_LOCK(unp); if (unp->unp_conn != NULL || (unp->unp_flags & UNP_CONNECTING) != 0) error = EINVAL; else if (unp->unp_vnode == NULL) error = EDESTADDRREQ; if (error != 0) { UNP_PCB_UNLOCK(unp); return (error); } SOCK_LOCK(so); error = solisten_proto_check(so); if (error == 0) { cru2xt(td, &unp->unp_peercred); solisten_proto(so, backlog); } SOCK_UNLOCK(so); UNP_PCB_UNLOCK(unp); return (error); } static int -uipc_peeraddr(struct socket *so, struct sockaddr **nam) +uipc_peeraddr(struct socket *so, struct sockaddr *ret) { struct unpcb *unp, *unp2; const struct sockaddr *sa; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL")); - *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); - UNP_PCB_LOCK(unp); unp2 = unp_pcb_lock_peer(unp); if (unp2 != NULL) { if (unp2->unp_addr != NULL) sa = (struct sockaddr *)unp2->unp_addr; else sa = &sun_noname; - bcopy(sa, *nam, sa->sa_len); + bcopy(sa, ret, sa->sa_len); unp_pcb_unlock_pair(unp, unp2); } else { - sa = &sun_noname; - bcopy(sa, *nam, sa->sa_len); UNP_PCB_UNLOCK(unp); + sa = &sun_noname; + bcopy(sa, ret, sa->sa_len); } return (0); } static int uipc_rcvd(struct socket *so, int flags) { struct unpcb *unp, *unp2; struct socket *so2; u_int mbcnt, sbcc; unp = sotounpcb(so); KASSERT(unp != NULL, ("%s: unp == NULL", __func__)); KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET, ("%s: socktype %d", __func__, so->so_type)); /* * Adjust backpressure on sender and wakeup any waiting to write. * * The unp lock is acquired to maintain the validity of the unp_conn * pointer; no lock on unp2 is required as unp2->unp_socket will be * static as long as we don't permit unp2 to disconnect from unp, * which is prevented by the lock on unp. We cache values from * so_rcv to avoid holding the so_rcv lock over the entire * transaction on the remote so_snd. */ SOCKBUF_LOCK(&so->so_rcv); mbcnt = so->so_rcv.sb_mbcnt; sbcc = sbavail(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); /* * There is a benign race condition at this point. If we're planning to * clear SB_STOP, but uipc_send is called on the connected socket at * this instant, it might add data to the sockbuf and set SB_STOP. Then * we would erroneously clear SB_STOP below, even though the sockbuf is * full. The race is benign because the only ill effect is to allow the * sockbuf to exceed its size limit, and the size limits are not * strictly guaranteed anyway. */ UNP_PCB_LOCK(unp); unp2 = unp->unp_conn; if (unp2 == NULL) { UNP_PCB_UNLOCK(unp); return (0); } so2 = unp2->unp_socket; SOCKBUF_LOCK(&so2->so_snd); if (sbcc < so2->so_snd.sb_hiwat && mbcnt < so2->so_snd.sb_mbmax) so2->so_snd.sb_flags &= ~SB_STOP; sowwakeup_locked(so2); UNP_PCB_UNLOCK(unp); return (0); } static int uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { struct unpcb *unp, *unp2; struct socket *so2; u_int mbcnt, sbcc; int error; unp = sotounpcb(so); KASSERT(unp != NULL, ("%s: unp == NULL", __func__)); KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET, ("%s: socktype %d", __func__, so->so_type)); error = 0; if (flags & PRUS_OOB) { error = EOPNOTSUPP; goto release; } if (control != NULL && (error = unp_internalize(&control, td, NULL, NULL, NULL))) goto release; unp2 = NULL; if ((so->so_state & SS_ISCONNECTED) == 0) { if (nam != NULL) { if ((error = unp_connect(so, nam, td)) != 0) goto out; } else { error = ENOTCONN; goto out; } } UNP_PCB_LOCK(unp); if ((unp2 = unp_pcb_lock_peer(unp)) == NULL) { UNP_PCB_UNLOCK(unp); error = ENOTCONN; goto out; } else if (so->so_snd.sb_state & SBS_CANTSENDMORE) { unp_pcb_unlock_pair(unp, unp2); error = EPIPE; goto out; } UNP_PCB_UNLOCK(unp); if ((so2 = unp2->unp_socket) == NULL) { UNP_PCB_UNLOCK(unp2); error = ENOTCONN; goto out; } SOCKBUF_LOCK(&so2->so_rcv); if (unp2->unp_flags & UNP_WANTCRED_MASK) { /* * Credentials are passed only once on SOCK_STREAM and * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS). */ control = unp_addsockcred(td, control, unp2->unp_flags, NULL, NULL, NULL); unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT; } /* * Send to paired receive port and wake up readers. Don't * check for space available in the receive buffer if we're * attaching ancillary data; Unix domain sockets only check * for space in the sending sockbuf, and that check is * performed one level up the stack. At that level we cannot * precisely account for the amount of buffer space used * (e.g., because control messages are not yet internalized). */ switch (so->so_type) { case SOCK_STREAM: if (control != NULL) { sbappendcontrol_locked(&so2->so_rcv, m, control, flags); control = NULL; } else sbappend_locked(&so2->so_rcv, m, flags); break; case SOCK_SEQPACKET: if (sbappendaddr_nospacecheck_locked(&so2->so_rcv, &sun_noname, m, control)) control = NULL; break; } mbcnt = so2->so_rcv.sb_mbcnt; sbcc = sbavail(&so2->so_rcv); if (sbcc) sorwakeup_locked(so2); else SOCKBUF_UNLOCK(&so2->so_rcv); /* * The PCB lock on unp2 protects the SB_STOP flag. Without it, * it would be possible for uipc_rcvd to be called at this * point, drain the receiving sockbuf, clear SB_STOP, and then * we would set SB_STOP below. That could lead to an empty * sockbuf having SB_STOP set */ SOCKBUF_LOCK(&so->so_snd); if (sbcc >= so->so_snd.sb_hiwat || mbcnt >= so->so_snd.sb_mbmax) so->so_snd.sb_flags |= SB_STOP; SOCKBUF_UNLOCK(&so->so_snd); UNP_PCB_UNLOCK(unp2); m = NULL; out: /* * PRUS_EOF is equivalent to pr_send followed by pr_shutdown. */ if (flags & PRUS_EOF) { UNP_PCB_LOCK(unp); socantsendmore(so); unp_shutdown(unp); UNP_PCB_UNLOCK(unp); } if (control != NULL && error != 0) unp_scan(control, unp_freerights); release: if (control != NULL) m_freem(control); /* * In case of PRUS_NOTREADY, uipc_ready() is responsible * for freeing memory. */ if (m != NULL && (flags & PRUS_NOTREADY) == 0) m_freem(m); return (error); } /* PF_UNIX/SOCK_DGRAM version of sbspace() */ static inline bool uipc_dgram_sbspace(struct sockbuf *sb, u_int cc, u_int mbcnt) { u_int bleft, mleft; /* * Negative space may happen if send(2) is followed by * setsockopt(SO_SNDBUF/SO_RCVBUF) that shrinks maximum. */ if (__predict_false(sb->sb_hiwat < sb->uxdg_cc || sb->sb_mbmax < sb->uxdg_mbcnt)) return (false); if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) return (false); bleft = sb->sb_hiwat - sb->uxdg_cc; mleft = sb->sb_mbmax - sb->uxdg_mbcnt; return (bleft >= cc && mleft >= mbcnt); } /* * PF_UNIX/SOCK_DGRAM send * * Allocate a record consisting of 3 mbufs in the sequence of * from -> control -> data and append it to the socket buffer. * * The first mbuf carries sender's name and is a pkthdr that stores * overall length of datagram, its memory consumption and control length. */ #define ctllen PH_loc.thirtytwo[1] _Static_assert(offsetof(struct pkthdr, memlen) + sizeof(u_int) <= offsetof(struct pkthdr, ctllen), "unix/dgram can not store ctllen"); static int uipc_sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *m, struct mbuf *c, int flags, struct thread *td) { struct unpcb *unp, *unp2; const struct sockaddr *from; struct socket *so2; struct sockbuf *sb; struct mbuf *f, *clast; u_int cc, ctl, mbcnt; u_int dcc __diagused, dctl __diagused, dmbcnt __diagused; int error; MPASS((uio != NULL && m == NULL) || (m != NULL && uio == NULL)); error = 0; f = NULL; ctl = 0; if (__predict_false(flags & MSG_OOB)) { error = EOPNOTSUPP; goto out; } if (m == NULL) { if (__predict_false(uio->uio_resid > unpdg_maxdgram)) { error = EMSGSIZE; goto out; } m = m_uiotombuf(uio, M_WAITOK, 0, max_hdr, M_PKTHDR); if (__predict_false(m == NULL)) { error = EFAULT; goto out; } f = m_gethdr(M_WAITOK, MT_SONAME); cc = m->m_pkthdr.len; mbcnt = MSIZE + m->m_pkthdr.memlen; if (c != NULL && (error = unp_internalize(&c, td, &clast, &ctl, &mbcnt))) goto out; } else { /* pr_sosend() with mbuf usually is a kernel thread. */ M_ASSERTPKTHDR(m); if (__predict_false(c != NULL)) panic("%s: control from a kernel thread", __func__); if (__predict_false(m->m_pkthdr.len > unpdg_maxdgram)) { error = EMSGSIZE; goto out; } if ((f = m_gethdr(M_NOWAIT, MT_SONAME)) == NULL) { error = ENOBUFS; goto out; } /* Condition the foreign mbuf to our standards. */ m_clrprotoflags(m); m_tag_delete_chain(m, NULL); m->m_pkthdr.rcvif = NULL; m->m_pkthdr.flowid = 0; m->m_pkthdr.csum_flags = 0; m->m_pkthdr.fibnum = 0; m->m_pkthdr.rsstype = 0; cc = m->m_pkthdr.len; mbcnt = MSIZE; for (struct mbuf *mb = m; mb != NULL; mb = mb->m_next) { mbcnt += MSIZE; if (mb->m_flags & M_EXT) mbcnt += mb->m_ext.ext_size; } } unp = sotounpcb(so); MPASS(unp); /* * XXXGL: would be cool to fully remove so_snd out of the equation * and avoid this lock, which is not only extraneous, but also being * released, thus still leaving possibility for a race. We can easily * handle SBS_CANTSENDMORE/SS_ISCONNECTED complement in unpcb, but it * is more difficult to invent something to handle so_error. */ error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); if (error) goto out2; SOCK_SENDBUF_LOCK(so); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCK_SENDBUF_UNLOCK(so); error = EPIPE; goto out3; } if (so->so_error != 0) { error = so->so_error; so->so_error = 0; SOCK_SENDBUF_UNLOCK(so); goto out3; } if (((so->so_state & SS_ISCONNECTED) == 0) && addr == NULL) { SOCK_SENDBUF_UNLOCK(so); error = EDESTADDRREQ; goto out3; } SOCK_SENDBUF_UNLOCK(so); if (addr != NULL) { if ((error = unp_connectat(AT_FDCWD, so, addr, td, true))) goto out3; UNP_PCB_LOCK_ASSERT(unp); unp2 = unp->unp_conn; UNP_PCB_LOCK_ASSERT(unp2); } else { UNP_PCB_LOCK(unp); unp2 = unp_pcb_lock_peer(unp); if (unp2 == NULL) { UNP_PCB_UNLOCK(unp); error = ENOTCONN; goto out3; } } if (unp2->unp_flags & UNP_WANTCRED_MASK) c = unp_addsockcred(td, c, unp2->unp_flags, &clast, &ctl, &mbcnt); if (unp->unp_addr != NULL) from = (struct sockaddr *)unp->unp_addr; else from = &sun_noname; f->m_len = from->sa_len; MPASS(from->sa_len <= MLEN); bcopy(from, mtod(f, void *), from->sa_len); ctl += f->m_len; /* * Concatenate mbufs: from -> control -> data. * Save overall cc and mbcnt in "from" mbuf. */ if (c != NULL) { #ifdef INVARIANTS struct mbuf *mc; for (mc = c; mc->m_next != NULL; mc = mc->m_next); MPASS(mc == clast); #endif f->m_next = c; clast->m_next = m; c = NULL; } else f->m_next = m; m = NULL; #ifdef INVARIANTS dcc = dctl = dmbcnt = 0; for (struct mbuf *mb = f; mb != NULL; mb = mb->m_next) { if (mb->m_type == MT_DATA) dcc += mb->m_len; else dctl += mb->m_len; dmbcnt += MSIZE; if (mb->m_flags & M_EXT) dmbcnt += mb->m_ext.ext_size; } MPASS(dcc == cc); MPASS(dctl == ctl); MPASS(dmbcnt == mbcnt); #endif f->m_pkthdr.len = cc + ctl; f->m_pkthdr.memlen = mbcnt; f->m_pkthdr.ctllen = ctl; /* * Destination socket buffer selection. * * Unconnected sends, when !(so->so_state & SS_ISCONNECTED) and the * destination address is supplied, create a temporary connection for * the run time of the function (see call to unp_connectat() above and * to unp_disconnect() below). We distinguish them by condition of * (addr != NULL). We intentionally avoid adding 'bool connected' for * that condition, since, again, through the run time of this code we * are always connected. For such "unconnected" sends, the destination * buffer would be the receive buffer of destination socket so2. * * For connected sends, data lands on the send buffer of the sender's * socket "so". Then, if we just added the very first datagram * on this send buffer, we need to add the send buffer on to the * receiving socket's buffer list. We put ourselves on top of the * list. Such logic gives infrequent senders priority over frequent * senders. * * Note on byte count management. As long as event methods kevent(2), * select(2) are not protocol specific (yet), we need to maintain * meaningful values on the receive buffer. So, the receive buffer * would accumulate counters from all connected buffers potentially * having sb_ccc > sb_hiwat or sb_mbcnt > sb_mbmax. */ so2 = unp2->unp_socket; sb = (addr == NULL) ? &so->so_snd : &so2->so_rcv; SOCK_RECVBUF_LOCK(so2); if (uipc_dgram_sbspace(sb, cc + ctl, mbcnt)) { if (addr == NULL && STAILQ_EMPTY(&sb->uxdg_mb)) TAILQ_INSERT_HEAD(&so2->so_rcv.uxdg_conns, &so->so_snd, uxdg_clist); STAILQ_INSERT_TAIL(&sb->uxdg_mb, f, m_stailqpkt); sb->uxdg_cc += cc + ctl; sb->uxdg_ctl += ctl; sb->uxdg_mbcnt += mbcnt; so2->so_rcv.sb_acc += cc + ctl; so2->so_rcv.sb_ccc += cc + ctl; so2->so_rcv.sb_ctl += ctl; so2->so_rcv.sb_mbcnt += mbcnt; sorwakeup_locked(so2); f = NULL; } else { soroverflow_locked(so2); error = ENOBUFS; if (f->m_next->m_type == MT_CONTROL) { c = f->m_next; f->m_next = NULL; } } if (addr != NULL) unp_disconnect(unp, unp2); else unp_pcb_unlock_pair(unp, unp2); td->td_ru.ru_msgsnd++; out3: SOCK_IO_SEND_UNLOCK(so); out2: if (c) unp_scan(c, unp_freerights); out: if (f) m_freem(f); if (c) m_freem(c); if (m) m_freem(m); return (error); } /* * PF_UNIX/SOCK_DGRAM receive with MSG_PEEK. * The mbuf has already been unlinked from the uxdg_mb of socket buffer * and needs to be linked onto uxdg_peeked of receive socket buffer. */ static int uipc_peek_dgram(struct socket *so, struct mbuf *m, struct sockaddr **psa, struct uio *uio, struct mbuf **controlp, int *flagsp) { ssize_t len = 0; int error; so->so_rcv.uxdg_peeked = m; so->so_rcv.uxdg_cc += m->m_pkthdr.len; so->so_rcv.uxdg_ctl += m->m_pkthdr.ctllen; so->so_rcv.uxdg_mbcnt += m->m_pkthdr.memlen; SOCK_RECVBUF_UNLOCK(so); KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); if (psa != NULL) *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK); m = m->m_next; KASSERT(m, ("%s: no data or control after soname", __func__)); /* * With MSG_PEEK the control isn't executed, just copied. */ while (m != NULL && m->m_type == MT_CONTROL) { if (controlp != NULL) { *controlp = m_copym(m, 0, m->m_len, M_WAITOK); controlp = &(*controlp)->m_next; } m = m->m_next; } KASSERT(m == NULL || m->m_type == MT_DATA, ("%s: not MT_DATA mbuf %p", __func__, m)); while (m != NULL && uio->uio_resid > 0) { len = uio->uio_resid; if (len > m->m_len) len = m->m_len; error = uiomove(mtod(m, char *), (int)len, uio); if (error) { SOCK_IO_RECV_UNLOCK(so); return (error); } if (len == m->m_len) m = m->m_next; } SOCK_IO_RECV_UNLOCK(so); if (flagsp != NULL) { if (m != NULL) { if (*flagsp & MSG_TRUNC) { /* Report real length of the packet */ uio->uio_resid -= m_length(m, NULL) - len; } *flagsp |= MSG_TRUNC; } else *flagsp &= ~MSG_TRUNC; } return (0); } /* * PF_UNIX/SOCK_DGRAM receive */ static int uipc_soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { struct sockbuf *sb = NULL; struct mbuf *m; int flags, error; ssize_t len = 0; bool nonblock; MPASS(mp0 == NULL); if (psa != NULL) *psa = NULL; if (controlp != NULL) *controlp = NULL; flags = flagsp != NULL ? *flagsp : 0; nonblock = (so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT | MSG_NBIO)); error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (__predict_false(error)) return (error); /* * Loop blocking while waiting for a datagram. Prioritize connected * peers over unconnected sends. Set sb to selected socket buffer * containing an mbuf on exit from the wait loop. A datagram that * had already been peeked at has top priority. */ SOCK_RECVBUF_LOCK(so); while ((m = so->so_rcv.uxdg_peeked) == NULL && (sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) == NULL && (m = STAILQ_FIRST(&so->so_rcv.uxdg_mb)) == NULL) { if (so->so_error) { error = so->so_error; so->so_error = 0; SOCK_RECVBUF_UNLOCK(so); SOCK_IO_RECV_UNLOCK(so); return (error); } if (so->so_rcv.sb_state & SBS_CANTRCVMORE || uio->uio_resid == 0) { SOCK_RECVBUF_UNLOCK(so); SOCK_IO_RECV_UNLOCK(so); return (0); } if (nonblock) { SOCK_RECVBUF_UNLOCK(so); SOCK_IO_RECV_UNLOCK(so); return (EWOULDBLOCK); } error = sbwait(so, SO_RCV); if (error) { SOCK_RECVBUF_UNLOCK(so); SOCK_IO_RECV_UNLOCK(so); return (error); } } if (sb == NULL) sb = &so->so_rcv; else if (m == NULL) m = STAILQ_FIRST(&sb->uxdg_mb); else MPASS(m == so->so_rcv.uxdg_peeked); MPASS(sb->uxdg_cc > 0); M_ASSERTPKTHDR(m); KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; if (__predict_true(m != so->so_rcv.uxdg_peeked)) { STAILQ_REMOVE_HEAD(&sb->uxdg_mb, m_stailqpkt); if (STAILQ_EMPTY(&sb->uxdg_mb) && sb != &so->so_rcv) TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist); } else so->so_rcv.uxdg_peeked = NULL; sb->uxdg_cc -= m->m_pkthdr.len; sb->uxdg_ctl -= m->m_pkthdr.ctllen; sb->uxdg_mbcnt -= m->m_pkthdr.memlen; if (__predict_false(flags & MSG_PEEK)) return (uipc_peek_dgram(so, m, psa, uio, controlp, flagsp)); so->so_rcv.sb_acc -= m->m_pkthdr.len; so->so_rcv.sb_ccc -= m->m_pkthdr.len; so->so_rcv.sb_ctl -= m->m_pkthdr.ctllen; so->so_rcv.sb_mbcnt -= m->m_pkthdr.memlen; SOCK_RECVBUF_UNLOCK(so); if (psa != NULL) *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK); m = m_free(m); KASSERT(m, ("%s: no data or control after soname", __func__)); /* * Packet to copyout() is now in 'm' and it is disconnected from the * queue. * * Process one or more MT_CONTROL mbufs present before any data mbufs * in the first mbuf chain on the socket buffer. We call into the * unp_externalize() to perform externalization (or freeing if * controlp == NULL). In some cases there can be only MT_CONTROL mbufs * without MT_DATA mbufs. */ while (m != NULL && m->m_type == MT_CONTROL) { struct mbuf *cm; /* XXXGL: unp_externalize() is also dom_externalize() KBI and * it frees whole chain, so we must disconnect the mbuf. */ cm = m; m = m->m_next; cm->m_next = NULL; error = unp_externalize(cm, controlp, flags); if (error != 0) { SOCK_IO_RECV_UNLOCK(so); unp_scan(m, unp_freerights); m_freem(m); return (error); } if (controlp != NULL) { while (*controlp != NULL) controlp = &(*controlp)->m_next; } } KASSERT(m == NULL || m->m_type == MT_DATA, ("%s: not MT_DATA mbuf %p", __func__, m)); while (m != NULL && uio->uio_resid > 0) { len = uio->uio_resid; if (len > m->m_len) len = m->m_len; error = uiomove(mtod(m, char *), (int)len, uio); if (error) { SOCK_IO_RECV_UNLOCK(so); m_freem(m); return (error); } if (len == m->m_len) m = m_free(m); else { m->m_data += len; m->m_len -= len; } } SOCK_IO_RECV_UNLOCK(so); if (m != NULL) { if (flagsp != NULL) { if (flags & MSG_TRUNC) { /* Report real length of the packet */ uio->uio_resid -= m_length(m, NULL); } *flagsp |= MSG_TRUNC; } m_freem(m); } else if (flagsp != NULL) *flagsp &= ~MSG_TRUNC; return (0); } static bool uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp) { struct mbuf *mb, *n; struct sockbuf *sb; SOCK_LOCK(so); if (SOLISTENING(so)) { SOCK_UNLOCK(so); return (false); } mb = NULL; sb = &so->so_rcv; SOCKBUF_LOCK(sb); if (sb->sb_fnrdy != NULL) { for (mb = sb->sb_mb, n = mb->m_nextpkt; mb != NULL;) { if (mb == m) { *errorp = sbready(sb, m, count); break; } mb = mb->m_next; if (mb == NULL) { mb = n; if (mb != NULL) n = mb->m_nextpkt; } } } SOCKBUF_UNLOCK(sb); SOCK_UNLOCK(so); return (mb != NULL); } static int uipc_ready(struct socket *so, struct mbuf *m, int count) { struct unpcb *unp, *unp2; struct socket *so2; int error, i; unp = sotounpcb(so); KASSERT(so->so_type == SOCK_STREAM, ("%s: unexpected socket type for %p", __func__, so)); UNP_PCB_LOCK(unp); if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) { UNP_PCB_UNLOCK(unp); so2 = unp2->unp_socket; SOCKBUF_LOCK(&so2->so_rcv); if ((error = sbready(&so2->so_rcv, m, count)) == 0) sorwakeup_locked(so2); else SOCKBUF_UNLOCK(&so2->so_rcv); UNP_PCB_UNLOCK(unp2); return (error); } UNP_PCB_UNLOCK(unp); /* * The receiving socket has been disconnected, but may still be valid. * In this case, the now-ready mbufs are still present in its socket * buffer, so perform an exhaustive search before giving up and freeing * the mbufs. */ UNP_LINK_RLOCK(); LIST_FOREACH(unp, &unp_shead, unp_link) { if (uipc_ready_scan(unp->unp_socket, m, count, &error)) break; } UNP_LINK_RUNLOCK(); if (unp == NULL) { for (i = 0; i < count; i++) m = m_free(m); error = ECONNRESET; } return (error); } static int uipc_sense(struct socket *so, struct stat *sb) { struct unpcb *unp; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_sense: unp == NULL")); sb->st_blksize = so->so_snd.sb_hiwat; sb->st_dev = NODEV; sb->st_ino = unp->unp_ino; return (0); } static int uipc_shutdown(struct socket *so) { struct unpcb *unp; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_shutdown: unp == NULL")); UNP_PCB_LOCK(unp); socantsendmore(so); unp_shutdown(unp); UNP_PCB_UNLOCK(unp); return (0); } static int -uipc_sockaddr(struct socket *so, struct sockaddr **nam) +uipc_sockaddr(struct socket *so, struct sockaddr *ret) { struct unpcb *unp; const struct sockaddr *sa; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL")); - *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); UNP_PCB_LOCK(unp); if (unp->unp_addr != NULL) sa = (struct sockaddr *) unp->unp_addr; else sa = &sun_noname; - bcopy(sa, *nam, sa->sa_len); + bcopy(sa, ret, sa->sa_len); UNP_PCB_UNLOCK(unp); return (0); } static int uipc_ctloutput(struct socket *so, struct sockopt *sopt) { struct unpcb *unp; struct xucred xu; int error, optval; if (sopt->sopt_level != SOL_LOCAL) return (EINVAL); unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL")); error = 0; switch (sopt->sopt_dir) { case SOPT_GET: switch (sopt->sopt_name) { case LOCAL_PEERCRED: UNP_PCB_LOCK(unp); if (unp->unp_flags & UNP_HAVEPC) xu = unp->unp_peercred; else { if (so->so_type == SOCK_STREAM) error = ENOTCONN; else error = EINVAL; } UNP_PCB_UNLOCK(unp); if (error == 0) error = sooptcopyout(sopt, &xu, sizeof(xu)); break; case LOCAL_CREDS: /* Unlocked read. */ optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0; error = sooptcopyout(sopt, &optval, sizeof(optval)); break; case LOCAL_CREDS_PERSISTENT: /* Unlocked read. */ optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0; error = sooptcopyout(sopt, &optval, sizeof(optval)); break; case LOCAL_CONNWAIT: /* Unlocked read. */ optval = unp->unp_flags & UNP_CONNWAIT ? 1 : 0; error = sooptcopyout(sopt, &optval, sizeof(optval)); break; default: error = EOPNOTSUPP; break; } break; case SOPT_SET: switch (sopt->sopt_name) { case LOCAL_CREDS: case LOCAL_CREDS_PERSISTENT: case LOCAL_CONNWAIT: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); if (error) break; #define OPTSET(bit, exclusive) do { \ UNP_PCB_LOCK(unp); \ if (optval) { \ if ((unp->unp_flags & (exclusive)) != 0) { \ UNP_PCB_UNLOCK(unp); \ error = EINVAL; \ break; \ } \ unp->unp_flags |= (bit); \ } else \ unp->unp_flags &= ~(bit); \ UNP_PCB_UNLOCK(unp); \ } while (0) switch (sopt->sopt_name) { case LOCAL_CREDS: OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS); break; case LOCAL_CREDS_PERSISTENT: OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT); break; case LOCAL_CONNWAIT: OPTSET(UNP_CONNWAIT, 0); break; default: break; } break; #undef OPTSET default: error = ENOPROTOOPT; break; } break; default: error = EOPNOTSUPP; break; } return (error); } static int unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { return (unp_connectat(AT_FDCWD, so, nam, td, false)); } static int unp_connectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td, bool return_locked) { struct mtx *vplock; struct sockaddr_un *soun; struct vnode *vp; struct socket *so2; struct unpcb *unp, *unp2, *unp3; struct nameidata nd; char buf[SOCK_MAXADDRLEN]; struct sockaddr *sa; cap_rights_t rights; int error, len; bool connreq; if (nam->sa_family != AF_UNIX) return (EAFNOSUPPORT); if (nam->sa_len > sizeof(struct sockaddr_un)) return (EINVAL); len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); if (len <= 0) return (EINVAL); soun = (struct sockaddr_un *)nam; bcopy(soun->sun_path, buf, len); buf[len] = 0; error = 0; unp = sotounpcb(so); UNP_PCB_LOCK(unp); for (;;) { /* * Wait for connection state to stabilize. If a connection * already exists, give up. For datagram sockets, which permit * multiple consecutive connect(2) calls, upper layers are * responsible for disconnecting in advance of a subsequent * connect(2), but this is not synchronized with PCB connection * state. * * Also make sure that no threads are currently attempting to * lock the peer socket, to ensure that unp_conn cannot * transition between two valid sockets while locks are dropped. */ if (SOLISTENING(so)) error = EOPNOTSUPP; else if (unp->unp_conn != NULL) error = EISCONN; else if ((unp->unp_flags & UNP_CONNECTING) != 0) { error = EALREADY; } if (error != 0) { UNP_PCB_UNLOCK(unp); return (error); } if (unp->unp_pairbusy > 0) { unp->unp_flags |= UNP_WAITING; mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0); continue; } break; } unp->unp_flags |= UNP_CONNECTING; UNP_PCB_UNLOCK(unp); connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0; if (connreq) sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); else sa = NULL; NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT)); error = namei(&nd); if (error) vp = NULL; else vp = nd.ni_vp; ASSERT_VOP_LOCKED(vp, "unp_connect"); if (error) goto bad; NDFREE_PNBUF(&nd); if (vp->v_type != VSOCK) { error = ENOTSOCK; goto bad; } #ifdef MAC error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD); if (error) goto bad; #endif error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td); if (error) goto bad; unp = sotounpcb(so); KASSERT(unp != NULL, ("unp_connect: unp == NULL")); vplock = mtx_pool_find(mtxpool_sleep, vp); mtx_lock(vplock); VOP_UNP_CONNECT(vp, &unp2); if (unp2 == NULL) { error = ECONNREFUSED; goto bad2; } so2 = unp2->unp_socket; if (so->so_type != so2->so_type) { error = EPROTOTYPE; goto bad2; } if (connreq) { if (SOLISTENING(so2)) { CURVNET_SET(so2->so_vnet); so2 = sonewconn(so2, 0); CURVNET_RESTORE(); } else so2 = NULL; if (so2 == NULL) { error = ECONNREFUSED; goto bad2; } unp3 = sotounpcb(so2); unp_pcb_lock_pair(unp2, unp3); if (unp2->unp_addr != NULL) { bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len); unp3->unp_addr = (struct sockaddr_un *) sa; sa = NULL; } unp_copy_peercred(td, unp3, unp, unp2); UNP_PCB_UNLOCK(unp2); unp2 = unp3; /* * It is safe to block on the PCB lock here since unp2 is * nascent and cannot be connected to any other sockets. */ UNP_PCB_LOCK(unp); #ifdef MAC mac_socketpeer_set_from_socket(so, so2); mac_socketpeer_set_from_socket(so2, so); #endif } else { unp_pcb_lock_pair(unp, unp2); } KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 && sotounpcb(so2) == unp2, ("%s: unp2 %p so2 %p", __func__, unp2, so2)); unp_connect2(so, so2, PRU_CONNECT); KASSERT((unp->unp_flags & UNP_CONNECTING) != 0, ("%s: unp %p has UNP_CONNECTING clear", __func__, unp)); unp->unp_flags &= ~UNP_CONNECTING; if (!return_locked) unp_pcb_unlock_pair(unp, unp2); bad2: mtx_unlock(vplock); bad: if (vp != NULL) { /* * If we are returning locked (called via uipc_sosend_dgram()), * we need to be sure that vput() won't sleep. This is * guaranteed by VOP_UNP_CONNECT() call above and unp2 lock. * SOCK_STREAM/SEQPACKET can't request return_locked (yet). */ MPASS(!(return_locked && connreq)); vput(vp); } free(sa, M_SONAME); if (__predict_false(error)) { UNP_PCB_LOCK(unp); KASSERT((unp->unp_flags & UNP_CONNECTING) != 0, ("%s: unp %p has UNP_CONNECTING clear", __func__, unp)); unp->unp_flags &= ~UNP_CONNECTING; UNP_PCB_UNLOCK(unp); } return (error); } /* * Set socket peer credentials at connection time. * * The client's PCB credentials are copied from its process structure. The * server's PCB credentials are copied from the socket on which it called * listen(2). uipc_listen cached that process's credentials at the time. */ void unp_copy_peercred(struct thread *td, struct unpcb *client_unp, struct unpcb *server_unp, struct unpcb *listen_unp) { cru2xt(td, &client_unp->unp_peercred); client_unp->unp_flags |= UNP_HAVEPC; memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred, sizeof(server_unp->unp_peercred)); server_unp->unp_flags |= UNP_HAVEPC; client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK); } static void unp_connect2(struct socket *so, struct socket *so2, conn2_how req) { struct unpcb *unp; struct unpcb *unp2; MPASS(so2->so_type == so->so_type); unp = sotounpcb(so); KASSERT(unp != NULL, ("unp_connect2: unp == NULL")); unp2 = sotounpcb(so2); KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL")); UNP_PCB_LOCK_ASSERT(unp); UNP_PCB_LOCK_ASSERT(unp2); KASSERT(unp->unp_conn == NULL, ("%s: socket %p is already connected", __func__, unp)); unp->unp_conn = unp2; unp_pcb_hold(unp2); unp_pcb_hold(unp); switch (so->so_type) { case SOCK_DGRAM: UNP_REF_LIST_LOCK(); LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); UNP_REF_LIST_UNLOCK(); soisconnected(so); break; case SOCK_STREAM: case SOCK_SEQPACKET: KASSERT(unp2->unp_conn == NULL, ("%s: socket %p is already connected", __func__, unp2)); unp2->unp_conn = unp; if (req == PRU_CONNECT && ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT)) soisconnecting(so); else soisconnected(so); soisconnected(so2); break; default: panic("unp_connect2"); } } static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2) { struct socket *so, *so2; struct mbuf *m = NULL; #ifdef INVARIANTS struct unpcb *unptmp; #endif UNP_PCB_LOCK_ASSERT(unp); UNP_PCB_LOCK_ASSERT(unp2); KASSERT(unp->unp_conn == unp2, ("%s: unpcb %p is not connected to %p", __func__, unp, unp2)); unp->unp_conn = NULL; so = unp->unp_socket; so2 = unp2->unp_socket; switch (unp->unp_socket->so_type) { case SOCK_DGRAM: /* * Remove our send socket buffer from the peer's receive buffer. * Move the data to the receive buffer only if it is empty. * This is a protection against a scenario where a peer * connects, floods and disconnects, effectively blocking * sendto() from unconnected sockets. */ SOCK_RECVBUF_LOCK(so2); if (!STAILQ_EMPTY(&so->so_snd.uxdg_mb)) { TAILQ_REMOVE(&so2->so_rcv.uxdg_conns, &so->so_snd, uxdg_clist); if (__predict_true((so2->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) && STAILQ_EMPTY(&so2->so_rcv.uxdg_mb)) { STAILQ_CONCAT(&so2->so_rcv.uxdg_mb, &so->so_snd.uxdg_mb); so2->so_rcv.uxdg_cc += so->so_snd.uxdg_cc; so2->so_rcv.uxdg_ctl += so->so_snd.uxdg_ctl; so2->so_rcv.uxdg_mbcnt += so->so_snd.uxdg_mbcnt; } else { m = STAILQ_FIRST(&so->so_snd.uxdg_mb); STAILQ_INIT(&so->so_snd.uxdg_mb); so2->so_rcv.sb_acc -= so->so_snd.uxdg_cc; so2->so_rcv.sb_ccc -= so->so_snd.uxdg_cc; so2->so_rcv.sb_ctl -= so->so_snd.uxdg_ctl; so2->so_rcv.sb_mbcnt -= so->so_snd.uxdg_mbcnt; } /* Note: so may reconnect. */ so->so_snd.uxdg_cc = 0; so->so_snd.uxdg_ctl = 0; so->so_snd.uxdg_mbcnt = 0; } SOCK_RECVBUF_UNLOCK(so2); UNP_REF_LIST_LOCK(); #ifdef INVARIANTS LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) { if (unptmp == unp) break; } KASSERT(unptmp != NULL, ("%s: %p not found in reflist of %p", __func__, unp, unp2)); #endif LIST_REMOVE(unp, unp_reflink); UNP_REF_LIST_UNLOCK(); if (so) { SOCK_LOCK(so); so->so_state &= ~SS_ISCONNECTED; SOCK_UNLOCK(so); } break; case SOCK_STREAM: case SOCK_SEQPACKET: if (so) soisdisconnected(so); MPASS(unp2->unp_conn == unp); unp2->unp_conn = NULL; if (so2) soisdisconnected(so2); break; } if (unp == unp2) { unp_pcb_rele_notlast(unp); if (!unp_pcb_rele(unp)) UNP_PCB_UNLOCK(unp); } else { if (!unp_pcb_rele(unp)) UNP_PCB_UNLOCK(unp); if (!unp_pcb_rele(unp2)) UNP_PCB_UNLOCK(unp2); } if (m != NULL) { unp_scan(m, unp_freerights); m_freem(m); } } /* * unp_pcblist() walks the global list of struct unpcb's to generate a * pointer list, bumping the refcount on each unpcb. It then copies them out * sequentially, validating the generation number on each to see if it has * been detached. All of this is necessary because copyout() may sleep on * disk I/O. */ static int unp_pcblist(SYSCTL_HANDLER_ARGS) { struct unpcb *unp, **unp_list; unp_gen_t gencnt; struct xunpgen *xug; struct unp_head *head; struct xunpcb *xu; u_int i; int error, n; switch ((intptr_t)arg1) { case SOCK_STREAM: head = &unp_shead; break; case SOCK_DGRAM: head = &unp_dhead; break; case SOCK_SEQPACKET: head = &unp_sphead; break; default: panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1); } /* * The process of preparing the PCB list is too time-consuming and * resource-intensive to repeat twice on every request. */ if (req->oldptr == NULL) { n = unp_count; req->oldidx = 2 * (sizeof *xug) + (n + n/8) * sizeof(struct xunpcb); return (0); } if (req->newptr != NULL) return (EPERM); /* * OK, now we're committed to doing something. */ xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO); UNP_LINK_RLOCK(); gencnt = unp_gencnt; n = unp_count; UNP_LINK_RUNLOCK(); xug->xug_len = sizeof *xug; xug->xug_count = n; xug->xug_gen = gencnt; xug->xug_sogen = so_gencnt; error = SYSCTL_OUT(req, xug, sizeof *xug); if (error) { free(xug, M_TEMP); return (error); } unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK); UNP_LINK_RLOCK(); for (unp = LIST_FIRST(head), i = 0; unp && i < n; unp = LIST_NEXT(unp, unp_link)) { UNP_PCB_LOCK(unp); if (unp->unp_gencnt <= gencnt) { if (cr_cansee(req->td->td_ucred, unp->unp_socket->so_cred)) { UNP_PCB_UNLOCK(unp); continue; } unp_list[i++] = unp; unp_pcb_hold(unp); } UNP_PCB_UNLOCK(unp); } UNP_LINK_RUNLOCK(); n = i; /* In case we lost some during malloc. */ error = 0; xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO); for (i = 0; i < n; i++) { unp = unp_list[i]; UNP_PCB_LOCK(unp); if (unp_pcb_rele(unp)) continue; if (unp->unp_gencnt <= gencnt) { xu->xu_len = sizeof *xu; xu->xu_unpp = (uintptr_t)unp; /* * XXX - need more locking here to protect against * connect/disconnect races for SMP. */ if (unp->unp_addr != NULL) bcopy(unp->unp_addr, &xu->xu_addr, unp->unp_addr->sun_len); else bzero(&xu->xu_addr, sizeof(xu->xu_addr)); if (unp->unp_conn != NULL && unp->unp_conn->unp_addr != NULL) bcopy(unp->unp_conn->unp_addr, &xu->xu_caddr, unp->unp_conn->unp_addr->sun_len); else bzero(&xu->xu_caddr, sizeof(xu->xu_caddr)); xu->unp_vnode = (uintptr_t)unp->unp_vnode; xu->unp_conn = (uintptr_t)unp->unp_conn; xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs); xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink); xu->unp_gencnt = unp->unp_gencnt; sotoxsocket(unp->unp_socket, &xu->xu_socket); UNP_PCB_UNLOCK(unp); error = SYSCTL_OUT(req, xu, sizeof *xu); } else { UNP_PCB_UNLOCK(unp); } } free(xu, M_TEMP); if (!error) { /* * Give the user an updated idea of our state. If the * generation differs from what we told her before, she knows * that something happened while we were processing this * request, and it might be necessary to retry. */ xug->xug_gen = unp_gencnt; xug->xug_sogen = so_gencnt; xug->xug_count = unp_count; error = SYSCTL_OUT(req, xug, sizeof *xug); } free(unp_list, M_TEMP); free(xug, M_TEMP); return (error); } SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", "List of active local datagram sockets"); SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", "List of active local stream sockets"); SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb", "List of active local seqpacket sockets"); static void unp_shutdown(struct unpcb *unp) { struct unpcb *unp2; struct socket *so; UNP_PCB_LOCK_ASSERT(unp); unp2 = unp->unp_conn; if ((unp->unp_socket->so_type == SOCK_STREAM || (unp->unp_socket->so_type == SOCK_SEQPACKET)) && unp2 != NULL) { so = unp2->unp_socket; if (so != NULL) socantrcvmore(so); } } static void unp_drop(struct unpcb *unp) { struct socket *so; struct unpcb *unp2; /* * Regardless of whether the socket's peer dropped the connection * with this socket by aborting or disconnecting, POSIX requires * that ECONNRESET is returned. */ UNP_PCB_LOCK(unp); so = unp->unp_socket; if (so) so->so_error = ECONNRESET; if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) { /* Last reference dropped in unp_disconnect(). */ unp_pcb_rele_notlast(unp); unp_disconnect(unp, unp2); } else if (!unp_pcb_rele(unp)) { UNP_PCB_UNLOCK(unp); } } static void unp_freerights(struct filedescent **fdep, int fdcount) { struct file *fp; int i; KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount)); for (i = 0; i < fdcount; i++) { fp = fdep[i]->fde_file; filecaps_free(&fdep[i]->fde_caps); unp_discard(fp); } free(fdep[0], M_FILECAPS); } static int unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags) { struct thread *td = curthread; /* XXX */ struct cmsghdr *cm = mtod(control, struct cmsghdr *); int i; int *fdp; struct filedesc *fdesc = td->td_proc->p_fd; struct filedescent **fdep; void *data; socklen_t clen = control->m_len, datalen; int error, newfds; u_int newlen; UNP_LINK_UNLOCK_ASSERT(); error = 0; if (controlp != NULL) /* controlp == NULL => free control messages */ *controlp = NULL; while (cm != NULL) { MPASS(clen >= sizeof(*cm) && clen >= cm->cmsg_len); data = CMSG_DATA(cm); datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS) { newfds = datalen / sizeof(*fdep); if (newfds == 0) goto next; fdep = data; /* If we're not outputting the descriptors free them. */ if (error || controlp == NULL) { unp_freerights(fdep, newfds); goto next; } FILEDESC_XLOCK(fdesc); /* * Now change each pointer to an fd in the global * table to an integer that is the index to the local * fd table entry that we set up to point to the * global one we are transferring. */ newlen = newfds * sizeof(int); *controlp = sbcreatecontrol(NULL, newlen, SCM_RIGHTS, SOL_SOCKET, M_WAITOK); fdp = (int *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); if ((error = fdallocn(td, 0, fdp, newfds))) { FILEDESC_XUNLOCK(fdesc); unp_freerights(fdep, newfds); m_freem(*controlp); *controlp = NULL; goto next; } for (i = 0; i < newfds; i++, fdp++) { _finstall(fdesc, fdep[i]->fde_file, *fdp, (flags & MSG_CMSG_CLOEXEC) != 0 ? O_CLOEXEC : 0, &fdep[i]->fde_caps); unp_externalize_fp(fdep[i]->fde_file); } /* * The new type indicates that the mbuf data refers to * kernel resources that may need to be released before * the mbuf is freed. */ m_chtype(*controlp, MT_EXTCONTROL); FILEDESC_XUNLOCK(fdesc); free(fdep[0], M_FILECAPS); } else { /* We can just copy anything else across. */ if (error || controlp == NULL) goto next; *controlp = sbcreatecontrol(NULL, datalen, cm->cmsg_type, cm->cmsg_level, M_WAITOK); bcopy(data, CMSG_DATA(mtod(*controlp, struct cmsghdr *)), datalen); } controlp = &(*controlp)->m_next; next: if (CMSG_SPACE(datalen) < clen) { clen -= CMSG_SPACE(datalen); cm = (struct cmsghdr *) ((caddr_t)cm + CMSG_SPACE(datalen)); } else { clen = 0; cm = NULL; } } m_freem(control); return (error); } static void unp_zone_change(void *tag) { uma_zone_set_max(unp_zone, maxsockets); } #ifdef INVARIANTS static void unp_zdtor(void *mem, int size __unused, void *arg __unused) { struct unpcb *unp; unp = mem; KASSERT(LIST_EMPTY(&unp->unp_refs), ("%s: unpcb %p has lingering refs", __func__, unp)); KASSERT(unp->unp_socket == NULL, ("%s: unpcb %p has socket backpointer", __func__, unp)); KASSERT(unp->unp_vnode == NULL, ("%s: unpcb %p has vnode references", __func__, unp)); KASSERT(unp->unp_conn == NULL, ("%s: unpcb %p is still connected", __func__, unp)); KASSERT(unp->unp_addr == NULL, ("%s: unpcb %p has leaked addr", __func__, unp)); } #endif static void unp_init(void *arg __unused) { uma_dtor dtor; #ifdef INVARIANTS dtor = unp_zdtor; #else dtor = NULL; #endif unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor, NULL, NULL, UMA_ALIGN_CACHE, 0); uma_zone_set_max(unp_zone, maxsockets); uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached"); EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change, NULL, EVENTHANDLER_PRI_ANY); LIST_INIT(&unp_dhead); LIST_INIT(&unp_shead); LIST_INIT(&unp_sphead); SLIST_INIT(&unp_defers); TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL); TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL); UNP_LINK_LOCK_INIT(); UNP_DEFERRED_LOCK_INIT(); } SYSINIT(unp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, unp_init, NULL); static void unp_internalize_cleanup_rights(struct mbuf *control) { struct cmsghdr *cp; struct mbuf *m; void *data; socklen_t datalen; for (m = control; m != NULL; m = m->m_next) { cp = mtod(m, struct cmsghdr *); if (cp->cmsg_level != SOL_SOCKET || cp->cmsg_type != SCM_RIGHTS) continue; data = CMSG_DATA(cp); datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data; unp_freerights(data, datalen / sizeof(struct filedesc *)); } } static int unp_internalize(struct mbuf **controlp, struct thread *td, struct mbuf **clast, u_int *space, u_int *mbcnt) { struct mbuf *control, **initial_controlp; struct proc *p; struct filedesc *fdesc; struct bintime *bt; struct cmsghdr *cm; struct cmsgcred *cmcred; struct filedescent *fde, **fdep, *fdev; struct file *fp; struct timeval *tv; struct timespec *ts; void *data; socklen_t clen, datalen; int i, j, error, *fdp, oldfds; u_int newlen; MPASS((*controlp)->m_next == NULL); /* COMPAT_OLDSOCK may violate */ UNP_LINK_UNLOCK_ASSERT(); p = td->td_proc; fdesc = p->p_fd; error = 0; control = *controlp; *controlp = NULL; initial_controlp = controlp; for (clen = control->m_len, cm = mtod(control, struct cmsghdr *), data = CMSG_DATA(cm); clen >= sizeof(*cm) && cm->cmsg_level == SOL_SOCKET && clen >= cm->cmsg_len && cm->cmsg_len >= sizeof(*cm) && (char *)cm + cm->cmsg_len >= (char *)data; clen -= min(CMSG_SPACE(datalen), clen), cm = (struct cmsghdr *) ((char *)cm + CMSG_SPACE(datalen)), data = CMSG_DATA(cm)) { datalen = (char *)cm + cm->cmsg_len - (char *)data; switch (cm->cmsg_type) { case SCM_CREDS: *controlp = sbcreatecontrol(NULL, sizeof(*cmcred), SCM_CREDS, SOL_SOCKET, M_WAITOK); cmcred = (struct cmsgcred *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); cmcred->cmcred_pid = p->p_pid; cmcred->cmcred_uid = td->td_ucred->cr_ruid; cmcred->cmcred_gid = td->td_ucred->cr_rgid; cmcred->cmcred_euid = td->td_ucred->cr_uid; cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX); for (i = 0; i < cmcred->cmcred_ngroups; i++) cmcred->cmcred_groups[i] = td->td_ucred->cr_groups[i]; break; case SCM_RIGHTS: oldfds = datalen / sizeof (int); if (oldfds == 0) continue; /* On some machines sizeof pointer is bigger than * sizeof int, so we need to check if data fits into * single mbuf. We could allocate several mbufs, and * unp_externalize() should even properly handle that. * But it is not worth to complicate the code for an * insane scenario of passing over 200 file descriptors * at once. */ newlen = oldfds * sizeof(fdep[0]); if (CMSG_SPACE(newlen) > MCLBYTES) { error = EMSGSIZE; goto out; } /* * Check that all the FDs passed in refer to legal * files. If not, reject the entire operation. */ fdp = data; FILEDESC_SLOCK(fdesc); for (i = 0; i < oldfds; i++, fdp++) { fp = fget_noref(fdesc, *fdp); if (fp == NULL) { FILEDESC_SUNLOCK(fdesc); error = EBADF; goto out; } if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) { FILEDESC_SUNLOCK(fdesc); error = EOPNOTSUPP; goto out; } } /* * Now replace the integer FDs with pointers to the * file structure and capability rights. */ *controlp = sbcreatecontrol(NULL, newlen, SCM_RIGHTS, SOL_SOCKET, M_WAITOK); fdp = data; for (i = 0; i < oldfds; i++, fdp++) { if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) { fdp = data; for (j = 0; j < i; j++, fdp++) { fdrop(fdesc->fd_ofiles[*fdp]. fde_file, td); } FILEDESC_SUNLOCK(fdesc); error = EBADF; goto out; } } fdp = data; fdep = (struct filedescent **) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS, M_WAITOK); for (i = 0; i < oldfds; i++, fdev++, fdp++) { fde = &fdesc->fd_ofiles[*fdp]; fdep[i] = fdev; fdep[i]->fde_file = fde->fde_file; filecaps_copy(&fde->fde_caps, &fdep[i]->fde_caps, true); unp_internalize_fp(fdep[i]->fde_file); } FILEDESC_SUNLOCK(fdesc); break; case SCM_TIMESTAMP: *controlp = sbcreatecontrol(NULL, sizeof(*tv), SCM_TIMESTAMP, SOL_SOCKET, M_WAITOK); tv = (struct timeval *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); microtime(tv); break; case SCM_BINTIME: *controlp = sbcreatecontrol(NULL, sizeof(*bt), SCM_BINTIME, SOL_SOCKET, M_WAITOK); bt = (struct bintime *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); bintime(bt); break; case SCM_REALTIME: *controlp = sbcreatecontrol(NULL, sizeof(*ts), SCM_REALTIME, SOL_SOCKET, M_WAITOK); ts = (struct timespec *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); nanotime(ts); break; case SCM_MONOTONIC: *controlp = sbcreatecontrol(NULL, sizeof(*ts), SCM_MONOTONIC, SOL_SOCKET, M_WAITOK); ts = (struct timespec *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); nanouptime(ts); break; default: error = EINVAL; goto out; } if (space != NULL) { *space += (*controlp)->m_len; *mbcnt += MSIZE; if ((*controlp)->m_flags & M_EXT) *mbcnt += (*controlp)->m_ext.ext_size; *clast = *controlp; } controlp = &(*controlp)->m_next; } if (clen > 0) error = EINVAL; out: if (error != 0 && initial_controlp != NULL) unp_internalize_cleanup_rights(*initial_controlp); m_freem(control); return (error); } static struct mbuf * unp_addsockcred(struct thread *td, struct mbuf *control, int mode, struct mbuf **clast, u_int *space, u_int *mbcnt) { struct mbuf *m, *n, *n_prev; const struct cmsghdr *cm; int ngroups, i, cmsgtype; size_t ctrlsz; ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX); if (mode & UNP_WANTCRED_ALWAYS) { ctrlsz = SOCKCRED2SIZE(ngroups); cmsgtype = SCM_CREDS2; } else { ctrlsz = SOCKCREDSIZE(ngroups); cmsgtype = SCM_CREDS; } m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET, M_NOWAIT); if (m == NULL) return (control); MPASS((m->m_flags & M_EXT) == 0 && m->m_next == NULL); if (mode & UNP_WANTCRED_ALWAYS) { struct sockcred2 *sc; sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *)); sc->sc_version = 0; sc->sc_pid = td->td_proc->p_pid; sc->sc_uid = td->td_ucred->cr_ruid; sc->sc_euid = td->td_ucred->cr_uid; sc->sc_gid = td->td_ucred->cr_rgid; sc->sc_egid = td->td_ucred->cr_gid; sc->sc_ngroups = ngroups; for (i = 0; i < sc->sc_ngroups; i++) sc->sc_groups[i] = td->td_ucred->cr_groups[i]; } else { struct sockcred *sc; sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *)); sc->sc_uid = td->td_ucred->cr_ruid; sc->sc_euid = td->td_ucred->cr_uid; sc->sc_gid = td->td_ucred->cr_rgid; sc->sc_egid = td->td_ucred->cr_gid; sc->sc_ngroups = ngroups; for (i = 0; i < sc->sc_ngroups; i++) sc->sc_groups[i] = td->td_ucred->cr_groups[i]; } /* * Unlink SCM_CREDS control messages (struct cmsgcred), since just * created SCM_CREDS control message (struct sockcred) has another * format. */ if (control != NULL && cmsgtype == SCM_CREDS) for (n = control, n_prev = NULL; n != NULL;) { cm = mtod(n, struct cmsghdr *); if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_CREDS) { if (n_prev == NULL) control = n->m_next; else n_prev->m_next = n->m_next; if (space != NULL) { MPASS(*space >= n->m_len); *space -= n->m_len; MPASS(*mbcnt >= MSIZE); *mbcnt -= MSIZE; if (n->m_flags & M_EXT) { MPASS(*mbcnt >= n->m_ext.ext_size); *mbcnt -= n->m_ext.ext_size; } MPASS(clast); if (*clast == n) { MPASS(n->m_next == NULL); if (n_prev == NULL) *clast = m; else *clast = n_prev; } } n = m_free(n); } else { n_prev = n; n = n->m_next; } } /* Prepend it to the head. */ m->m_next = control; if (space != NULL) { *space += m->m_len; *mbcnt += MSIZE; if (control == NULL) *clast = m; } return (m); } static struct unpcb * fptounp(struct file *fp) { struct socket *so; if (fp->f_type != DTYPE_SOCKET) return (NULL); if ((so = fp->f_data) == NULL) return (NULL); if (so->so_proto->pr_domain != &localdomain) return (NULL); return sotounpcb(so); } static void unp_discard(struct file *fp) { struct unp_defer *dr; if (unp_externalize_fp(fp)) { dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK); dr->ud_fp = fp; UNP_DEFERRED_LOCK(); SLIST_INSERT_HEAD(&unp_defers, dr, ud_link); UNP_DEFERRED_UNLOCK(); atomic_add_int(&unp_defers_count, 1); taskqueue_enqueue(taskqueue_thread, &unp_defer_task); } else closef_nothread(fp); } static void unp_process_defers(void *arg __unused, int pending) { struct unp_defer *dr; SLIST_HEAD(, unp_defer) drl; int count; SLIST_INIT(&drl); for (;;) { UNP_DEFERRED_LOCK(); if (SLIST_FIRST(&unp_defers) == NULL) { UNP_DEFERRED_UNLOCK(); break; } SLIST_SWAP(&unp_defers, &drl, unp_defer); UNP_DEFERRED_UNLOCK(); count = 0; while ((dr = SLIST_FIRST(&drl)) != NULL) { SLIST_REMOVE_HEAD(&drl, ud_link); closef_nothread(dr->ud_fp); free(dr, M_TEMP); count++; } atomic_add_int(&unp_defers_count, -count); } } static void unp_internalize_fp(struct file *fp) { struct unpcb *unp; UNP_LINK_WLOCK(); if ((unp = fptounp(fp)) != NULL) { unp->unp_file = fp; unp->unp_msgcount++; } unp_rights++; UNP_LINK_WUNLOCK(); } static int unp_externalize_fp(struct file *fp) { struct unpcb *unp; int ret; UNP_LINK_WLOCK(); if ((unp = fptounp(fp)) != NULL) { unp->unp_msgcount--; ret = 1; } else ret = 0; unp_rights--; UNP_LINK_WUNLOCK(); return (ret); } /* * unp_defer indicates whether additional work has been defered for a future * pass through unp_gc(). It is thread local and does not require explicit * synchronization. */ static int unp_marked; static void unp_remove_dead_ref(struct filedescent **fdep, int fdcount) { struct unpcb *unp; struct file *fp; int i; /* * This function can only be called from the gc task. */ KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0, ("%s: not on gc callout", __func__)); UNP_LINK_LOCK_ASSERT(); for (i = 0; i < fdcount; i++) { fp = fdep[i]->fde_file; if ((unp = fptounp(fp)) == NULL) continue; if ((unp->unp_gcflag & UNPGC_DEAD) == 0) continue; unp->unp_gcrefs--; } } static void unp_restore_undead_ref(struct filedescent **fdep, int fdcount) { struct unpcb *unp; struct file *fp; int i; /* * This function can only be called from the gc task. */ KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0, ("%s: not on gc callout", __func__)); UNP_LINK_LOCK_ASSERT(); for (i = 0; i < fdcount; i++) { fp = fdep[i]->fde_file; if ((unp = fptounp(fp)) == NULL) continue; if ((unp->unp_gcflag & UNPGC_DEAD) == 0) continue; unp->unp_gcrefs++; unp_marked++; } } static void unp_scan_socket(struct socket *so, void (*op)(struct filedescent **, int)) { struct sockbuf *sb; SOCK_LOCK_ASSERT(so); if (sotounpcb(so)->unp_gcflag & UNPGC_IGNORE_RIGHTS) return; SOCK_RECVBUF_LOCK(so); switch (so->so_type) { case SOCK_DGRAM: unp_scan(STAILQ_FIRST(&so->so_rcv.uxdg_mb), op); unp_scan(so->so_rcv.uxdg_peeked, op); TAILQ_FOREACH(sb, &so->so_rcv.uxdg_conns, uxdg_clist) unp_scan(STAILQ_FIRST(&sb->uxdg_mb), op); break; case SOCK_STREAM: case SOCK_SEQPACKET: unp_scan(so->so_rcv.sb_mb, op); break; } SOCK_RECVBUF_UNLOCK(so); } static void unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int)) { struct socket *so, *soa; so = unp->unp_socket; SOCK_LOCK(so); if (SOLISTENING(so)) { /* * Mark all sockets in our accept queue. */ TAILQ_FOREACH(soa, &so->sol_comp, so_list) unp_scan_socket(soa, op); } else { /* * Mark all sockets we reference with RIGHTS. */ unp_scan_socket(so, op); } SOCK_UNLOCK(so); } static int unp_recycled; SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0, "Number of unreachable sockets claimed by the garbage collector."); static int unp_taskcount; SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0, "Number of times the garbage collector has run."); SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0, "Number of active local sockets."); static void unp_gc(__unused void *arg, int pending) { struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead, NULL }; struct unp_head **head; struct unp_head unp_deadhead; /* List of potentially-dead sockets. */ struct file *f, **unref; struct unpcb *unp, *unptmp; int i, total, unp_unreachable; LIST_INIT(&unp_deadhead); unp_taskcount++; UNP_LINK_RLOCK(); /* * First determine which sockets may be in cycles. */ unp_unreachable = 0; for (head = heads; *head != NULL; head++) LIST_FOREACH(unp, *head, unp_link) { KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0, ("%s: unp %p has unexpected gc flags 0x%x", __func__, unp, (unsigned int)unp->unp_gcflag)); f = unp->unp_file; /* * Check for an unreachable socket potentially in a * cycle. It must be in a queue as indicated by * msgcount, and this must equal the file reference * count. Note that when msgcount is 0 the file is * NULL. */ if (f != NULL && unp->unp_msgcount != 0 && refcount_load(&f->f_count) == unp->unp_msgcount) { LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead); unp->unp_gcflag |= UNPGC_DEAD; unp->unp_gcrefs = unp->unp_msgcount; unp_unreachable++; } } /* * Scan all sockets previously marked as potentially being in a cycle * and remove the references each socket holds on any UNPGC_DEAD * sockets in its queue. After this step, all remaining references on * sockets marked UNPGC_DEAD should not be part of any cycle. */ LIST_FOREACH(unp, &unp_deadhead, unp_dead) unp_gc_scan(unp, unp_remove_dead_ref); /* * If a socket still has a non-negative refcount, it cannot be in a * cycle. In this case increment refcount of all children iteratively. * Stop the scan once we do a complete loop without discovering * a new reachable socket. */ do { unp_marked = 0; LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp) if (unp->unp_gcrefs > 0) { unp->unp_gcflag &= ~UNPGC_DEAD; LIST_REMOVE(unp, unp_dead); KASSERT(unp_unreachable > 0, ("%s: unp_unreachable underflow.", __func__)); unp_unreachable--; unp_gc_scan(unp, unp_restore_undead_ref); } } while (unp_marked); UNP_LINK_RUNLOCK(); if (unp_unreachable == 0) return; /* * Allocate space for a local array of dead unpcbs. * TODO: can this path be simplified by instead using the local * dead list at unp_deadhead, after taking out references * on the file object and/or unpcb and dropping the link lock? */ unref = malloc(unp_unreachable * sizeof(struct file *), M_TEMP, M_WAITOK); /* * Iterate looking for sockets which have been specifically marked * as unreachable and store them locally. */ UNP_LINK_RLOCK(); total = 0; LIST_FOREACH(unp, &unp_deadhead, unp_dead) { KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0, ("%s: unp %p not marked UNPGC_DEAD", __func__, unp)); unp->unp_gcflag &= ~UNPGC_DEAD; f = unp->unp_file; if (unp->unp_msgcount == 0 || f == NULL || refcount_load(&f->f_count) != unp->unp_msgcount || !fhold(f)) continue; unref[total++] = f; KASSERT(total <= unp_unreachable, ("%s: incorrect unreachable count.", __func__)); } UNP_LINK_RUNLOCK(); /* * Now flush all sockets, free'ing rights. This will free the * struct files associated with these sockets but leave each socket * with one remaining ref. */ for (i = 0; i < total; i++) { struct socket *so; so = unref[i]->f_data; CURVNET_SET(so->so_vnet); sorflush(so); CURVNET_RESTORE(); } /* * And finally release the sockets so they can be reclaimed. */ for (i = 0; i < total; i++) fdrop(unref[i], NULL); unp_recycled += total; free(unref, M_TEMP); } /* * Synchronize against unp_gc, which can trip over data as we are freeing it. */ static void unp_dispose(struct socket *so) { struct sockbuf *sb; struct unpcb *unp; struct mbuf *m; MPASS(!SOLISTENING(so)); unp = sotounpcb(so); UNP_LINK_WLOCK(); unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS; UNP_LINK_WUNLOCK(); /* * Grab our special mbufs before calling sbrelease(). */ SOCK_RECVBUF_LOCK(so); switch (so->so_type) { case SOCK_DGRAM: while ((sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) != NULL) { STAILQ_CONCAT(&so->so_rcv.uxdg_mb, &sb->uxdg_mb); TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist); /* Note: socket of sb may reconnect. */ sb->uxdg_cc = sb->uxdg_ctl = sb->uxdg_mbcnt = 0; } sb = &so->so_rcv; if (sb->uxdg_peeked != NULL) { STAILQ_INSERT_HEAD(&sb->uxdg_mb, sb->uxdg_peeked, m_stailqpkt); sb->uxdg_peeked = NULL; } m = STAILQ_FIRST(&sb->uxdg_mb); STAILQ_INIT(&sb->uxdg_mb); /* XXX: our shortened sbrelease() */ (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, RLIM_INFINITY); /* * XXXGL Mark sb with SBS_CANTRCVMORE. This is needed to * prevent uipc_sosend_dgram() or unp_disconnect() adding more * data to the socket. * We are now in dom_dispose and it could be a call from * soshutdown() or from the final sofree(). The sofree() case * is simple as it guarantees that no more sends will happen, * however we can race with unp_disconnect() from our peer. * The shutdown(2) case is more exotic. It would call into * dom_dispose() only if socket is SS_ISCONNECTED. This is * possible if we did connect(2) on this socket and we also * had it bound with bind(2) and receive connections from other * sockets. Because soshutdown() violates POSIX (see comment * there) we will end up here shutting down our receive side. * Of course this will have affect not only on the peer we * connect(2)ed to, but also on all of the peers who had * connect(2)ed to us. Their sends would end up with ENOBUFS. */ sb->sb_state |= SBS_CANTRCVMORE; break; case SOCK_STREAM: case SOCK_SEQPACKET: sb = &so->so_rcv; m = sbcut_locked(sb, sb->sb_ccc); KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0, ("%s: ccc %u mb %p mbcnt %u", __func__, sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt)); sbrelease_locked(so, SO_RCV); break; } SOCK_RECVBUF_UNLOCK(so); if (SOCK_IO_RECV_OWNED(so)) SOCK_IO_RECV_UNLOCK(so); if (m != NULL) { unp_scan(m, unp_freerights); m_freem(m); } } static void unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int)) { struct mbuf *m; struct cmsghdr *cm; void *data; socklen_t clen, datalen; while (m0 != NULL) { for (m = m0; m; m = m->m_next) { if (m->m_type != MT_CONTROL) continue; cm = mtod(m, struct cmsghdr *); clen = m->m_len; while (cm != NULL) { if (sizeof(*cm) > clen || cm->cmsg_len > clen) break; data = CMSG_DATA(cm); datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS) { (*op)(data, datalen / sizeof(struct filedescent *)); } if (CMSG_SPACE(datalen) < clen) { clen -= CMSG_SPACE(datalen); cm = (struct cmsghdr *) ((caddr_t)cm + CMSG_SPACE(datalen)); } else { clen = 0; cm = NULL; } } } m0 = m0->m_nextpkt; } } /* * Definitions of protocols supported in the LOCAL domain. */ static struct protosw streamproto = { .pr_type = SOCK_STREAM, .pr_flags = PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS| PR_CAPATTACH, .pr_ctloutput = &uipc_ctloutput, .pr_abort = uipc_abort, - .pr_accept = uipc_accept, + .pr_accept = uipc_peeraddr, .pr_attach = uipc_attach, .pr_bind = uipc_bind, .pr_bindat = uipc_bindat, .pr_connect = uipc_connect, .pr_connectat = uipc_connectat, .pr_connect2 = uipc_connect2, .pr_detach = uipc_detach, .pr_disconnect = uipc_disconnect, .pr_listen = uipc_listen, .pr_peeraddr = uipc_peeraddr, .pr_rcvd = uipc_rcvd, .pr_send = uipc_send, .pr_ready = uipc_ready, .pr_sense = uipc_sense, .pr_shutdown = uipc_shutdown, .pr_sockaddr = uipc_sockaddr, .pr_soreceive = soreceive_generic, .pr_close = uipc_close, }; static struct protosw dgramproto = { .pr_type = SOCK_DGRAM, .pr_flags = PR_ATOMIC | PR_ADDR |PR_RIGHTS | PR_CAPATTACH | PR_SOCKBUF, .pr_ctloutput = &uipc_ctloutput, .pr_abort = uipc_abort, - .pr_accept = uipc_accept, + .pr_accept = uipc_peeraddr, .pr_attach = uipc_attach, .pr_bind = uipc_bind, .pr_bindat = uipc_bindat, .pr_connect = uipc_connect, .pr_connectat = uipc_connectat, .pr_connect2 = uipc_connect2, .pr_detach = uipc_detach, .pr_disconnect = uipc_disconnect, .pr_peeraddr = uipc_peeraddr, .pr_sosend = uipc_sosend_dgram, .pr_sense = uipc_sense, .pr_shutdown = uipc_shutdown, .pr_sockaddr = uipc_sockaddr, .pr_soreceive = uipc_soreceive_dgram, .pr_close = uipc_close, }; static struct protosw seqpacketproto = { .pr_type = SOCK_SEQPACKET, /* * XXXRW: For now, PR_ADDR because soreceive will bump into them * due to our use of sbappendaddr. A new sbappend variants is needed * that supports both atomic record writes and control data. */ .pr_flags = PR_ADDR|PR_ATOMIC|PR_CONNREQUIRED| PR_WANTRCVD|PR_RIGHTS|PR_CAPATTACH, .pr_ctloutput = &uipc_ctloutput, .pr_abort = uipc_abort, - .pr_accept = uipc_accept, + .pr_accept = uipc_peeraddr, .pr_attach = uipc_attach, .pr_bind = uipc_bind, .pr_bindat = uipc_bindat, .pr_connect = uipc_connect, .pr_connectat = uipc_connectat, .pr_connect2 = uipc_connect2, .pr_detach = uipc_detach, .pr_disconnect = uipc_disconnect, .pr_listen = uipc_listen, .pr_peeraddr = uipc_peeraddr, .pr_rcvd = uipc_rcvd, .pr_send = uipc_send, .pr_sense = uipc_sense, .pr_shutdown = uipc_shutdown, .pr_sockaddr = uipc_sockaddr, .pr_soreceive = soreceive_generic, /* XXX: or...? */ .pr_close = uipc_close, }; static struct domain localdomain = { .dom_family = AF_LOCAL, .dom_name = "local", .dom_externalize = unp_externalize, .dom_dispose = unp_dispose, .dom_nprotosw = 3, .dom_protosw = { &streamproto, &dgramproto, &seqpacketproto, } }; DOMAIN_SET(local); /* * A helper function called by VFS before socket-type vnode reclamation. * For an active vnode it clears unp_vnode pointer and decrements unp_vnode * use count. */ void vfs_unp_reclaim(struct vnode *vp) { struct unpcb *unp; int active; struct mtx *vplock; ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim"); KASSERT(vp->v_type == VSOCK, ("vfs_unp_reclaim: vp->v_type != VSOCK")); active = 0; vplock = mtx_pool_find(mtxpool_sleep, vp); mtx_lock(vplock); VOP_UNP_CONNECT(vp, &unp); if (unp == NULL) goto done; UNP_PCB_LOCK(unp); if (unp->unp_vnode == vp) { VOP_UNP_DETACH(vp); unp->unp_vnode = NULL; active = 1; } UNP_PCB_UNLOCK(unp); done: mtx_unlock(vplock); if (active) vunref(vp); } #ifdef DDB static void db_print_indent(int indent) { int i; for (i = 0; i < indent; i++) db_printf(" "); } static void db_print_unpflags(int unp_flags) { int comma; comma = 0; if (unp_flags & UNP_HAVEPC) { db_printf("%sUNP_HAVEPC", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_WANTCRED_ALWAYS) { db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_WANTCRED_ONESHOT) { db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_CONNWAIT) { db_printf("%sUNP_CONNWAIT", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_CONNECTING) { db_printf("%sUNP_CONNECTING", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_BINDING) { db_printf("%sUNP_BINDING", comma ? ", " : ""); comma = 1; } } static void db_print_xucred(int indent, struct xucred *xu) { int comma, i; db_print_indent(indent); db_printf("cr_version: %u cr_uid: %u cr_pid: %d cr_ngroups: %d\n", xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups); db_print_indent(indent); db_printf("cr_groups: "); comma = 0; for (i = 0; i < xu->cr_ngroups; i++) { db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]); comma = 1; } db_printf("\n"); } static void db_print_unprefs(int indent, struct unp_head *uh) { struct unpcb *unp; int counter; counter = 0; LIST_FOREACH(unp, uh, unp_reflink) { if (counter % 4 == 0) db_print_indent(indent); db_printf("%p ", unp); if (counter % 4 == 3) db_printf("\n"); counter++; } if (counter != 0 && counter % 4 != 0) db_printf("\n"); } DB_SHOW_COMMAND(unpcb, db_show_unpcb) { struct unpcb *unp; if (!have_addr) { db_printf("usage: show unpcb \n"); return; } unp = (struct unpcb *)addr; db_printf("unp_socket: %p unp_vnode: %p\n", unp->unp_socket, unp->unp_vnode); db_printf("unp_ino: %ju unp_conn: %p\n", (uintmax_t)unp->unp_ino, unp->unp_conn); db_printf("unp_refs:\n"); db_print_unprefs(2, &unp->unp_refs); /* XXXRW: Would be nice to print the full address, if any. */ db_printf("unp_addr: %p\n", unp->unp_addr); db_printf("unp_gencnt: %llu\n", (unsigned long long)unp->unp_gencnt); db_printf("unp_flags: %x (", unp->unp_flags); db_print_unpflags(unp->unp_flags); db_printf(")\n"); db_printf("unp_peercred:\n"); db_print_xucred(2, &unp->unp_peercred); db_printf("unp_refcount: %u\n", unp->unp_refcount); } #endif diff --git a/sys/net/if_ovpn.c b/sys/net/if_ovpn.c index 1b5d419fe58b..fb9596069101 100644 --- a/sys/net/if_ovpn.c +++ b/sys/net/if_ovpn.c @@ -1,2579 +1,2575 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021-2022 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "if_ovpn.h" struct ovpn_kkey_dir { int refcount; uint8_t key[32]; uint8_t keylen; uint8_t nonce[8]; uint8_t noncelen; enum ovpn_key_cipher cipher; crypto_session_t cryptoid; struct mtx replay_mtx; /* * Last seen gapless sequence number. New rx seq numbers must be * strictly higher than this. */ uint32_t rx_seq; uint64_t tx_seq; /* Seen packets, relative to rx_seq. bit(0) will always be 0. */ uint64_t rx_window; }; struct ovpn_kkey { struct ovpn_kkey_dir *encrypt; struct ovpn_kkey_dir *decrypt; uint8_t keyid; uint32_t peerid; }; struct ovpn_keepalive { uint32_t interval; uint32_t timeout; }; struct ovpn_wire_header { uint32_t opcode; /* opcode, key id, peer id */ uint32_t seq; uint8_t auth_tag[16]; }; struct ovpn_peer_counters { uint64_t pkt_in; uint64_t pkt_out; uint64_t bytes_in; uint64_t bytes_out; }; #define OVPN_PEER_COUNTER_SIZE (sizeof(struct ovpn_peer_counters)/sizeof(uint64_t)) struct ovpn_notification { enum ovpn_notif_type type; uint32_t peerid; /* Delete notification */ enum ovpn_del_reason del_reason; struct ovpn_peer_counters counters; }; struct ovpn_softc; struct ovpn_kpeer { RB_ENTRY(ovpn_kpeer) tree; int refcount; uint32_t peerid; struct ovpn_softc *sc; struct sockaddr_storage local; struct sockaddr_storage remote; struct in_addr vpn4; struct in6_addr vpn6; struct ovpn_kkey keys[2]; enum ovpn_del_reason del_reason; struct ovpn_keepalive keepalive; uint32_t *last_active; struct callout ping_send; struct callout ping_rcv; counter_u64_t counters[OVPN_PEER_COUNTER_SIZE]; }; struct ovpn_counters { uint64_t lost_ctrl_pkts_in; uint64_t lost_ctrl_pkts_out; uint64_t lost_data_pkts_in; uint64_t lost_data_pkts_out; uint64_t nomem_data_pkts_in; uint64_t nomem_data_pkts_out; uint64_t received_ctrl_pkts; uint64_t received_data_pkts; uint64_t sent_ctrl_pkts; uint64_t sent_data_pkts; uint64_t transport_bytes_sent; uint64_t transport_bytes_received; uint64_t tunnel_bytes_sent; uint64_t tunnel_bytes_received; }; #define OVPN_COUNTER_SIZE (sizeof(struct ovpn_counters)/sizeof(uint64_t)) RB_HEAD(ovpn_kpeers, ovpn_kpeer); struct ovpn_softc { int refcount; struct rmlock lock; struct ifnet *ifp; struct socket *so; int peercount; struct ovpn_kpeers peers; /* Pending notification */ struct buf_ring *notifring; counter_u64_t counters[OVPN_COUNTER_SIZE]; struct epoch_context epoch_ctx; }; static struct ovpn_kpeer *ovpn_find_peer(struct ovpn_softc *, uint32_t); static bool ovpn_udp_input(struct mbuf *, int, struct inpcb *, const struct sockaddr *, void *); static int ovpn_transmit_to_peer(struct ifnet *, struct mbuf *, struct ovpn_kpeer *, struct rm_priotracker *); static int ovpn_encap(struct ovpn_softc *, uint32_t, struct mbuf *); static int ovpn_get_af(struct mbuf *); static void ovpn_free_kkey_dir(struct ovpn_kkey_dir *); static bool ovpn_check_replay(struct ovpn_kkey_dir *, uint32_t); static int ovpn_peer_compare(struct ovpn_kpeer *, struct ovpn_kpeer *); static RB_PROTOTYPE(ovpn_kpeers, ovpn_kpeer, tree, ovpn_peer_compare); static RB_GENERATE(ovpn_kpeers, ovpn_kpeer, tree, ovpn_peer_compare); #define OVPN_MTU_MIN 576 #define OVPN_MTU_MAX (IP_MAXPACKET - sizeof(struct ip) - \ sizeof(struct udphdr) - sizeof(struct ovpn_wire_header)) #define OVPN_OP_DATA_V2 0x09 #define OVPN_OP_SHIFT 3 #define OVPN_SEQ_ROTATE 0x80000000 VNET_DEFINE_STATIC(struct if_clone *, ovpn_cloner); #define V_ovpn_cloner VNET(ovpn_cloner) #define OVPN_RLOCK_TRACKER struct rm_priotracker _ovpn_lock_tracker; \ struct rm_priotracker *_ovpn_lock_trackerp = &_ovpn_lock_tracker #define OVPN_RLOCK(sc) rm_rlock(&(sc)->lock, _ovpn_lock_trackerp) #define OVPN_RUNLOCK(sc) rm_runlock(&(sc)->lock, _ovpn_lock_trackerp) #define OVPN_WLOCK(sc) rm_wlock(&(sc)->lock) #define OVPN_WUNLOCK(sc) rm_wunlock(&(sc)->lock) #define OVPN_ASSERT(sc) rm_assert(&(sc)->lock, RA_LOCKED) #define OVPN_RASSERT(sc) rm_assert(&(sc)->lock, RA_RLOCKED) #define OVPN_WASSERT(sc) rm_assert(&(sc)->lock, RA_WLOCKED) #define OVPN_UNLOCK_ASSERT(sc) rm_assert(&(sc)->lock, RA_UNLOCKED) #define OVPN_COUNTER(sc, name) \ ((sc)->counters[offsetof(struct ovpn_counters, name)/sizeof(uint64_t)]) #define OVPN_PEER_COUNTER(peer, name) \ ((peer)->counters[offsetof(struct ovpn_peer_counters, name) / \ sizeof(uint64_t)]) #define OVPN_COUNTER_ADD(sc, name, val) \ counter_u64_add(OVPN_COUNTER(sc, name), val) #define OVPN_PEER_COUNTER_ADD(p, name, val) \ counter_u64_add(OVPN_PEER_COUNTER(p, name), val) #define TO_IN(x) ((struct sockaddr_in *)(x)) #define TO_IN6(x) ((struct sockaddr_in6 *)(x)) SDT_PROVIDER_DEFINE(if_ovpn); SDT_PROBE_DEFINE1(if_ovpn, tx, transmit, start, "struct mbuf *"); SDT_PROBE_DEFINE2(if_ovpn, tx, route, ip4, "struct in_addr *", "struct ovpn_kpeer *"); SDT_PROBE_DEFINE2(if_ovpn, tx, route, ip6, "struct in6_addr *", "struct ovpn_kpeer *"); static const char ovpnname[] = "ovpn"; static const char ovpngroupname[] = "openvpn"; static MALLOC_DEFINE(M_OVPN, ovpnname, "OpenVPN DCO Interface"); SYSCTL_DECL(_net_link); static SYSCTL_NODE(_net_link, IFT_OTHER, openvpn, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "OpenVPN DCO Interface"); VNET_DEFINE_STATIC(int, replay_protection) = 0; #define V_replay_protection VNET(replay_protection) SYSCTL_INT(_net_link_openvpn, OID_AUTO, replay_protection, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(replay_protection), 0, "Validate sequence numbers"); VNET_DEFINE_STATIC(int, async_crypto); #define V_async_crypto VNET(async_crypto) SYSCTL_INT(_net_link_openvpn, OID_AUTO, async_crypto, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(async_crypto), 0, "Use asynchronous mode to parallelize crypto jobs."); VNET_DEFINE_STATIC(int, async_netisr_queue); #define V_async_netisr_queue VNET(async_netisr_queue) SYSCTL_INT(_net_link_openvpn, OID_AUTO, netisr_queue, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(async_netisr_queue), 0, "Use netisr_queue() rather than netisr_dispatch()."); static int ovpn_peer_compare(struct ovpn_kpeer *a, struct ovpn_kpeer *b) { return (a->peerid - b->peerid); } static struct ovpn_kpeer * ovpn_find_peer(struct ovpn_softc *sc, uint32_t peerid) { struct ovpn_kpeer p; OVPN_ASSERT(sc); p.peerid = peerid; return (RB_FIND(ovpn_kpeers, &sc->peers, &p)); } static struct ovpn_kpeer * ovpn_find_only_peer(struct ovpn_softc *sc) { OVPN_ASSERT(sc); return (RB_ROOT(&sc->peers)); } static uint16_t ovpn_get_port(struct sockaddr_storage *s) { switch (s->ss_family) { case AF_INET: { struct sockaddr_in *in = (struct sockaddr_in *)s; return (in->sin_port); } case AF_INET6: { struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)s; return (in6->sin6_port); } default: panic("Unsupported address family %d", s->ss_family); } } static int ovpn_nvlist_to_sockaddr(const nvlist_t *nvl, struct sockaddr_storage *sa) { int af; if (! nvlist_exists_number(nvl, "af")) return (EINVAL); if (! nvlist_exists_binary(nvl, "address")) return (EINVAL); if (! nvlist_exists_number(nvl, "port")) return (EINVAL); af = nvlist_get_number(nvl, "af"); switch (af) { #ifdef INET case AF_INET: { struct sockaddr_in *in = (struct sockaddr_in *)sa; size_t len; const void *addr = nvlist_get_binary(nvl, "address", &len); in->sin_family = af; if (len != sizeof(in->sin_addr)) return (EINVAL); memcpy(&in->sin_addr, addr, sizeof(in->sin_addr)); in->sin_port = nvlist_get_number(nvl, "port"); break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)sa; size_t len; const void *addr = nvlist_get_binary(nvl, "address", &len); in6->sin6_family = af; if (len != sizeof(in6->sin6_addr)) return (EINVAL); memcpy(&in6->sin6_addr, addr, sizeof(in6->sin6_addr)); in6->sin6_port = nvlist_get_number(nvl, "port"); break; } #endif default: return (EINVAL); } return (0); } static bool ovpn_has_peers(struct ovpn_softc *sc) { OVPN_ASSERT(sc); return (sc->peercount > 0); } static void ovpn_rele_so(struct ovpn_softc *sc, struct ovpn_kpeer *peer) { bool has_peers; OVPN_WASSERT(sc); if (sc->so == NULL) return; has_peers = ovpn_has_peers(sc); /* Only remove the tunnel function if we're releasing the socket for * the last peer. */ if (! has_peers) (void)udp_set_kernel_tunneling(sc->so, NULL, NULL, NULL); sorele(sc->so); if (! has_peers) sc->so = NULL; } static void ovpn_notify_del_peer(struct ovpn_softc *sc, struct ovpn_kpeer *peer) { struct ovpn_notification *n; OVPN_WASSERT(sc); n = malloc(sizeof(*n), M_OVPN, M_NOWAIT); if (n == NULL) return; n->peerid = peer->peerid; n->type = OVPN_NOTIF_DEL_PEER; n->del_reason = peer->del_reason; n->counters.pkt_in = counter_u64_fetch(OVPN_PEER_COUNTER(peer, pkt_in)); n->counters.pkt_out = counter_u64_fetch(OVPN_PEER_COUNTER(peer, pkt_out)); n->counters.bytes_in = counter_u64_fetch(OVPN_PEER_COUNTER(peer, bytes_in)); n->counters.bytes_out = counter_u64_fetch(OVPN_PEER_COUNTER(peer, bytes_out)); if (buf_ring_enqueue(sc->notifring, n) != 0) { free(n, M_OVPN); } else if (sc->so != NULL) { /* Wake up userspace */ sc->so->so_error = EAGAIN; sorwakeup(sc->so); sowwakeup(sc->so); } } static void ovpn_notify_key_rotation(struct ovpn_softc *sc, struct ovpn_kpeer *peer) { struct ovpn_notification *n; n = malloc(sizeof(*n), M_OVPN, M_NOWAIT | M_ZERO); if (n == NULL) return; n->peerid = peer->peerid; n->type = OVPN_NOTIF_ROTATE_KEY; if (buf_ring_enqueue(sc->notifring, n) != 0) { free(n, M_OVPN); } else if (sc->so != NULL) { /* Wake up userspace */ sc->so->so_error = EAGAIN; sorwakeup(sc->so); sowwakeup(sc->so); } } static void ovpn_peer_release_ref(struct ovpn_kpeer *peer, bool locked) { struct ovpn_softc *sc; CURVNET_ASSERT_SET(); atomic_add_int(&peer->refcount, -1); if (atomic_load_int(&peer->refcount) > 0) return; sc = peer->sc; if (! locked) { OVPN_WLOCK(sc); /* Might have changed before we acquired the lock. */ if (atomic_load_int(&peer->refcount) > 0) { OVPN_WUNLOCK(sc); return; } } OVPN_ASSERT(sc); /* The peer should have been removed from the list already. */ MPASS(ovpn_find_peer(sc, peer->peerid) == NULL); ovpn_notify_del_peer(sc, peer); for (int i = 0; i < 2; i++) { ovpn_free_kkey_dir(peer->keys[i].encrypt); ovpn_free_kkey_dir(peer->keys[i].decrypt); } ovpn_rele_so(sc, peer); callout_stop(&peer->ping_send); callout_stop(&peer->ping_rcv); uma_zfree_pcpu(pcpu_zone_4, peer->last_active); free(peer, M_OVPN); if (! locked) OVPN_WUNLOCK(sc); } static int ovpn_new_peer(struct ifnet *ifp, const nvlist_t *nvl) { #ifdef INET6 struct epoch_tracker et; #endif struct sockaddr_storage remote; struct ovpn_kpeer *peer = NULL; struct file *fp = NULL; - struct sockaddr *name = NULL; struct ovpn_softc *sc = ifp->if_softc; struct thread *td = curthread; struct socket *so = NULL; int fd; uint32_t peerid; int ret = 0; if (nvl == NULL) return (EINVAL); if (! nvlist_exists_number(nvl, "peerid")) return (EINVAL); if (! nvlist_exists_number(nvl, "fd")) return (EINVAL); if (! nvlist_exists_nvlist(nvl, "remote")) return (EINVAL); peerid = nvlist_get_number(nvl, "peerid"); ret = ovpn_nvlist_to_sockaddr(nvlist_get_nvlist(nvl, "remote"), &remote); if (ret != 0) return (ret); fd = nvlist_get_number(nvl, "fd"); /* Look up the userspace process and use the fd to find the socket. */ ret = getsock(td, fd, &cap_connect_rights, &fp); if (ret != 0) return (ret); so = fp->f_data; peer = malloc(sizeof(*peer), M_OVPN, M_WAITOK | M_ZERO); peer->peerid = peerid; peer->sc = sc; peer->refcount = 1; peer->last_active = uma_zalloc_pcpu(pcpu_zone_4, M_WAITOK | M_ZERO); COUNTER_ARRAY_ALLOC(peer->counters, OVPN_PEER_COUNTER_SIZE, M_WAITOK); if (nvlist_exists_binary(nvl, "vpn_ipv4")) { size_t len; const void *addr = nvlist_get_binary(nvl, "vpn_ipv4", &len); if (len != sizeof(peer->vpn4)) { ret = EINVAL; goto error; } memcpy(&peer->vpn4, addr, len); } if (nvlist_exists_binary(nvl, "vpn_ipv6")) { size_t len; const void *addr = nvlist_get_binary(nvl, "vpn_ipv6", &len); if (len != sizeof(peer->vpn6)) { ret = EINVAL; goto error; } memcpy(&peer->vpn6, addr, len); } callout_init_rm(&peer->ping_send, &sc->lock, CALLOUT_SHAREDLOCK); callout_init_rm(&peer->ping_rcv, &sc->lock, 0); - ret = so->so_proto->pr_sockaddr(so, &name); + peer->local.ss_len = sizeof(peer->local); + ret = sosockaddr(so, (struct sockaddr *)&peer->local); if (ret) goto error; - if (ovpn_get_port((struct sockaddr_storage *)name) == 0) { + if (ovpn_get_port(&peer->local) == 0) { ret = EINVAL; goto error; } - if (name->sa_family != remote.ss_family) { + if (peer->local.ss_family != remote.ss_family) { ret = EINVAL; goto error; } - memcpy(&peer->local, name, name->sa_len); memcpy(&peer->remote, &remote, sizeof(remote)); - free(name, M_SONAME); - name = NULL; if (peer->local.ss_family == AF_INET6 && IN6_IS_ADDR_V4MAPPED(&TO_IN6(&peer->remote)->sin6_addr)) { /* V4 mapped address, so treat this as v4, not v6. */ in6_sin6_2_sin_in_sock((struct sockaddr *)&peer->local); in6_sin6_2_sin_in_sock((struct sockaddr *)&peer->remote); } #ifdef INET6 if (peer->local.ss_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&TO_IN6(&peer->local)->sin6_addr)) { NET_EPOCH_ENTER(et); ret = in6_selectsrc_addr(curthread->td_proc->p_fibnum, &TO_IN6(&peer->remote)->sin6_addr, 0, NULL, &TO_IN6(&peer->local)->sin6_addr, NULL); NET_EPOCH_EXIT(et); if (ret != 0) { goto error; } } #endif OVPN_WLOCK(sc); /* Disallow peer id re-use. */ if (ovpn_find_peer(sc, peerid) != NULL) { ret = EEXIST; goto error_locked; } /* Make sure this is really a UDP socket. */ if (so->so_type != SOCK_DGRAM || so->so_proto->pr_type != SOCK_DGRAM) { ret = EPROTOTYPE; goto error_locked; } /* Must be the same socket as for other peers on this interface. */ if (sc->so != NULL && so != sc->so) goto error_locked; if (sc->so == NULL) sc->so = so; /* Insert the peer into the list. */ RB_INSERT(ovpn_kpeers, &sc->peers, peer); sc->peercount++; soref(sc->so); ret = udp_set_kernel_tunneling(sc->so, ovpn_udp_input, NULL, sc); if (ret == EBUSY) { /* Fine, another peer already set the input function. */ ret = 0; } if (ret != 0) { RB_REMOVE(ovpn_kpeers, &sc->peers, peer); sc->peercount--; goto error_locked; } OVPN_WUNLOCK(sc); goto done; error_locked: OVPN_WUNLOCK(sc); error: - free(name, M_SONAME); COUNTER_ARRAY_FREE(peer->counters, OVPN_PEER_COUNTER_SIZE); uma_zfree_pcpu(pcpu_zone_4, peer->last_active); free(peer, M_OVPN); done: if (fp != NULL) fdrop(fp, td); return (ret); } static int _ovpn_del_peer(struct ovpn_softc *sc, struct ovpn_kpeer *peer) { struct ovpn_kpeer *tmp __diagused; OVPN_WASSERT(sc); CURVNET_ASSERT_SET(); MPASS(RB_FIND(ovpn_kpeers, &sc->peers, peer) == peer); tmp = RB_REMOVE(ovpn_kpeers, &sc->peers, peer); MPASS(tmp != NULL); sc->peercount--; ovpn_peer_release_ref(peer, true); return (0); } static int ovpn_del_peer(struct ifnet *ifp, nvlist_t *nvl) { struct ovpn_softc *sc = ifp->if_softc; struct ovpn_kpeer *peer; uint32_t peerid; int ret; OVPN_WASSERT(sc); if (nvl == NULL) return (EINVAL); if (! nvlist_exists_number(nvl, "peerid")) return (EINVAL); peerid = nvlist_get_number(nvl, "peerid"); peer = ovpn_find_peer(sc, peerid); if (peer == NULL) return (ENOENT); peer->del_reason = OVPN_DEL_REASON_REQUESTED; ret = _ovpn_del_peer(sc, peer); return (ret); } static int ovpn_create_kkey_dir(struct ovpn_kkey_dir **kdirp, const nvlist_t *nvl) { struct crypto_session_params csp; struct ovpn_kkey_dir *kdir; const char *ciphername; enum ovpn_key_cipher cipher; const void *key, *iv; size_t keylen = 0, ivlen = 0; int error; if (! nvlist_exists_string(nvl, "cipher")) return (EINVAL); ciphername = nvlist_get_string(nvl, "cipher"); if (strcmp(ciphername, "none") == 0) cipher = OVPN_CIPHER_ALG_NONE; else if (strcmp(ciphername, "AES-256-GCM") == 0 || strcmp(ciphername, "AES-192-GCM") == 0 || strcmp(ciphername, "AES-128-GCM") == 0) cipher = OVPN_CIPHER_ALG_AES_GCM; else if (strcmp(ciphername, "CHACHA20-POLY1305") == 0) cipher = OVPN_CIPHER_ALG_CHACHA20_POLY1305; else return (EINVAL); if (cipher != OVPN_CIPHER_ALG_NONE) { if (! nvlist_exists_binary(nvl, "key")) return (EINVAL); key = nvlist_get_binary(nvl, "key", &keylen); if (keylen > sizeof(kdir->key)) return (E2BIG); if (! nvlist_exists_binary(nvl, "iv")) return (EINVAL); iv = nvlist_get_binary(nvl, "iv", &ivlen); if (ivlen != 8) return (E2BIG); } kdir = malloc(sizeof(struct ovpn_kkey_dir), M_OVPN, M_WAITOK | M_ZERO); kdir->cipher = cipher; kdir->keylen = keylen; kdir->tx_seq = 1; memcpy(kdir->key, key, keylen); kdir->noncelen = ivlen; memcpy(kdir->nonce, iv, ivlen); if (kdir->cipher != OVPN_CIPHER_ALG_NONE) { /* Crypto init */ bzero(&csp, sizeof(csp)); csp.csp_mode = CSP_MODE_AEAD; if (kdir->cipher == OVPN_CIPHER_ALG_CHACHA20_POLY1305) csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305; else csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16; csp.csp_flags |= CSP_F_SEPARATE_AAD; csp.csp_cipher_klen = kdir->keylen; csp.csp_cipher_key = kdir->key; csp.csp_ivlen = 96 / 8; error = crypto_newsession(&kdir->cryptoid, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); if (error) { free(kdir, M_OVPN); return (error); } } mtx_init(&kdir->replay_mtx, "if_ovpn rx replay", NULL, MTX_DEF); *kdirp = kdir; return (0); } static void ovpn_free_kkey_dir(struct ovpn_kkey_dir *kdir) { if (kdir == NULL) return; mtx_destroy(&kdir->replay_mtx); crypto_freesession(kdir->cryptoid); free(kdir, M_OVPN); } static int ovpn_set_key(struct ifnet *ifp, const nvlist_t *nvl) { struct ovpn_softc *sc = ifp->if_softc; struct ovpn_kkey_dir *enc, *dec; struct ovpn_kpeer *peer; int slot, keyid, peerid; int error; if (nvl == NULL) return (EINVAL); if (! nvlist_exists_number(nvl, "slot")) return (EINVAL); slot = nvlist_get_number(nvl, "slot"); if (! nvlist_exists_number(nvl, "keyid")) return (EINVAL); keyid = nvlist_get_number(nvl, "keyid"); if (! nvlist_exists_number(nvl, "peerid")) return (EINVAL); peerid = nvlist_get_number(nvl, "peerid"); if (slot != OVPN_KEY_SLOT_PRIMARY && slot != OVPN_KEY_SLOT_SECONDARY) return (EINVAL); if (! nvlist_exists_nvlist(nvl, "encrypt") || ! nvlist_exists_nvlist(nvl, "decrypt")) return (EINVAL); error = ovpn_create_kkey_dir(&enc, nvlist_get_nvlist(nvl, "encrypt")); if (error) return (error); error = ovpn_create_kkey_dir(&dec, nvlist_get_nvlist(nvl, "decrypt")); if (error) { ovpn_free_kkey_dir(enc); return (error); } OVPN_WLOCK(sc); peer = ovpn_find_peer(sc, peerid); if (peer == NULL) { ovpn_free_kkey_dir(dec); ovpn_free_kkey_dir(enc); OVPN_WUNLOCK(sc); return (ENOENT); } ovpn_free_kkey_dir(peer->keys[slot].encrypt); ovpn_free_kkey_dir(peer->keys[slot].decrypt); peer->keys[slot].encrypt = enc; peer->keys[slot].decrypt = dec; peer->keys[slot].keyid = keyid; peer->keys[slot].peerid = peerid; OVPN_WUNLOCK(sc); return (0); } static int ovpn_check_key(struct ovpn_softc *sc, struct ovpn_kpeer *peer, enum ovpn_key_slot slot) { OVPN_ASSERT(sc); if (peer->keys[slot].encrypt == NULL) return (ENOLINK); if (peer->keys[slot].decrypt == NULL) return (ENOLINK); return (0); } static int ovpn_start(struct ifnet *ifp) { struct ovpn_softc *sc = ifp->if_softc; OVPN_WLOCK(sc); ifp->if_flags |= IFF_UP; ifp->if_drv_flags |= IFF_DRV_RUNNING; if_link_state_change(ifp, LINK_STATE_UP); OVPN_WUNLOCK(sc); return (0); } static int ovpn_swap_keys(struct ifnet *ifp, nvlist_t *nvl) { struct ovpn_softc *sc = ifp->if_softc; struct ovpn_kpeer *peer; struct ovpn_kkey tmpkey; int error; if (nvl == NULL) return (EINVAL); if (! nvlist_exists_number(nvl, "peerid")) return (EINVAL); OVPN_WLOCK(sc); peer = ovpn_find_peer(sc, nvlist_get_number(nvl, "peerid")); if (peer == NULL) { OVPN_WUNLOCK(sc); return (ENOENT); } /* Check that we have a second key to swap to. */ error = ovpn_check_key(sc, peer, OVPN_KEY_SLOT_SECONDARY); if (error) { OVPN_WUNLOCK(sc); return (error); } tmpkey = peer->keys[0]; peer->keys[0] = peer->keys[1]; peer->keys[1] = tmpkey; OVPN_WUNLOCK(sc); return (0); } static int ovpn_del_key(struct ifnet *ifp, const nvlist_t *nvl) { enum ovpn_key_slot slot; struct ovpn_kpeer *peer; struct ovpn_softc *sc = ifp->if_softc; if (nvl == NULL) return (EINVAL); if (! nvlist_exists_number(nvl, "peerid")) return (EINVAL); if (! nvlist_exists_number(nvl, "slot")) return (EINVAL); slot = nvlist_get_number(nvl, "slot"); if (slot != OVPN_KEY_SLOT_PRIMARY && slot != OVPN_KEY_SLOT_SECONDARY) return (EINVAL); OVPN_WLOCK(sc); peer = ovpn_find_peer(sc, nvlist_get_number(nvl, "peerid")); if (peer == NULL) { OVPN_WUNLOCK(sc); return (ENOENT); } ovpn_free_kkey_dir(peer->keys[slot].encrypt); ovpn_free_kkey_dir(peer->keys[slot].decrypt); peer->keys[slot].encrypt = NULL; peer->keys[slot].decrypt = NULL; peer->keys[slot].keyid = 0; peer->keys[slot].peerid = 0; OVPN_WUNLOCK(sc); return (0); } static void ovpn_send_ping(void *arg) { static const uint8_t ping_str[] = { 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb, 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48 }; struct epoch_tracker et; struct ovpn_kpeer *peer = arg; struct ovpn_softc *sc = peer->sc; struct mbuf *m; OVPN_RASSERT(sc); /* Ensure we repeat! */ callout_reset(&peer->ping_send, peer->keepalive.interval * hz, ovpn_send_ping, peer); m = m_get2(sizeof(ping_str), M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return; m_copyback(m, 0, sizeof(ping_str), ping_str); m->m_len = m->m_pkthdr.len = sizeof(ping_str); CURVNET_SET(sc->ifp->if_vnet); NET_EPOCH_ENTER(et); (void)ovpn_transmit_to_peer(sc->ifp, m, peer, NULL); NET_EPOCH_EXIT(et); CURVNET_RESTORE(); } static void ovpn_timeout(void *arg) { struct ovpn_kpeer *peer = arg; struct ovpn_softc *sc = peer->sc; uint32_t last, _last_active; int ret __diagused; int cpu; OVPN_WASSERT(sc); last = 0; CPU_FOREACH(cpu) { _last_active = *zpcpu_get_cpu(peer->last_active, cpu); if (_last_active > last) last = _last_active; } if (last + peer->keepalive.timeout > time_uptime) { callout_reset(&peer->ping_rcv, (peer->keepalive.timeout - (time_uptime - last)) * hz, ovpn_timeout, peer); return; } CURVNET_SET(sc->ifp->if_vnet); peer->del_reason = OVPN_DEL_REASON_TIMEOUT; ret = _ovpn_del_peer(sc, peer); MPASS(ret == 0); CURVNET_RESTORE(); } static int ovpn_set_peer(struct ifnet *ifp, const nvlist_t *nvl) { struct ovpn_softc *sc = ifp->if_softc; struct ovpn_kpeer *peer; if (nvl == NULL) return (EINVAL); if (! nvlist_exists_number(nvl, "interval") || ! nvlist_exists_number(nvl, "timeout") || ! nvlist_exists_number(nvl, "peerid")) return (EINVAL); OVPN_WLOCK(sc); peer = ovpn_find_peer(sc, nvlist_get_number(nvl, "peerid")); if (peer == NULL) { OVPN_WUNLOCK(sc); return (ENOENT); } peer->keepalive.interval = nvlist_get_number(nvl, "interval"); peer->keepalive.timeout = nvlist_get_number(nvl, "timeout"); if (peer->keepalive.interval > 0) callout_reset(&peer->ping_send, peer->keepalive.interval * hz, ovpn_send_ping, peer); if (peer->keepalive.timeout > 0) callout_reset(&peer->ping_rcv, peer->keepalive.timeout * hz, ovpn_timeout, peer); OVPN_WUNLOCK(sc); return (0); } static int ovpn_set_ifmode(struct ifnet *ifp, const nvlist_t *nvl) { struct ovpn_softc *sc = ifp->if_softc; int ifmode; if (nvl == NULL) return (EINVAL); if (! nvlist_exists_number(nvl, "ifmode") ) return (EINVAL); ifmode = nvlist_get_number(nvl, "ifmode"); OVPN_WLOCK(sc); /* deny this if UP */ if (ifp->if_flags & IFF_UP) { OVPN_WUNLOCK(sc); return (EBUSY); } switch (ifmode & ~IFF_MULTICAST) { case IFF_POINTOPOINT: case IFF_BROADCAST: ifp->if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST); ifp->if_flags |= ifmode; break; default: OVPN_WUNLOCK(sc); return (EINVAL); } OVPN_WUNLOCK(sc); return (0); } static int ovpn_ioctl_set(struct ifnet *ifp, struct ifdrv *ifd) { struct ovpn_softc *sc = ifp->if_softc; uint8_t *buf = NULL; nvlist_t *nvl = NULL; int ret; if (ifd->ifd_len != 0) { if (ifd->ifd_len > OVPN_MAX_REQUEST_SIZE) return (E2BIG); buf = malloc(ifd->ifd_len, M_OVPN, M_WAITOK); ret = copyin(ifd->ifd_data, buf, ifd->ifd_len); if (ret != 0) { free(buf, M_OVPN); return (ret); } nvl = nvlist_unpack(buf, ifd->ifd_len, 0); free(buf, M_OVPN); if (nvl == NULL) { return (EINVAL); } } switch (ifd->ifd_cmd) { case OVPN_NEW_PEER: ret = ovpn_new_peer(ifp, nvl); break; case OVPN_DEL_PEER: OVPN_WLOCK(sc); ret = ovpn_del_peer(ifp, nvl); OVPN_WUNLOCK(sc); break; case OVPN_NEW_KEY: ret = ovpn_set_key(ifp, nvl); break; case OVPN_START_VPN: ret = ovpn_start(ifp); break; case OVPN_SWAP_KEYS: ret = ovpn_swap_keys(ifp, nvl); break; case OVPN_DEL_KEY: ret = ovpn_del_key(ifp, nvl); break; case OVPN_SET_PEER: ret = ovpn_set_peer(ifp, nvl); break; case OVPN_SET_IFMODE: ret = ovpn_set_ifmode(ifp, nvl); break; default: ret = ENOTSUP; } nvlist_destroy(nvl); return (ret); } static int ovpn_add_counters(nvlist_t *parent, const char *name, counter_u64_t in, counter_u64_t out) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (ENOMEM); nvlist_add_number(nvl, "in", counter_u64_fetch(in)); nvlist_add_number(nvl, "out", counter_u64_fetch(out)); nvlist_add_nvlist(parent, name, nvl); nvlist_destroy(nvl); return (0); } static int ovpn_get_stats(struct ovpn_softc *sc, nvlist_t **onvl) { nvlist_t *nvl; int ret; nvl = nvlist_create(0); if (nvl == NULL) return (ENOMEM); #define OVPN_COUNTER_OUT(name, in, out) \ do { \ ret = ovpn_add_counters(nvl, name, OVPN_COUNTER(sc, in), \ OVPN_COUNTER(sc, out)); \ if (ret != 0) \ goto error; \ } while(0) OVPN_COUNTER_OUT("lost_ctrl", lost_ctrl_pkts_in, lost_ctrl_pkts_out); OVPN_COUNTER_OUT("lost_data", lost_data_pkts_in, lost_data_pkts_out); OVPN_COUNTER_OUT("nomem_data", nomem_data_pkts_in, nomem_data_pkts_out); OVPN_COUNTER_OUT("data", received_data_pkts, sent_data_pkts); OVPN_COUNTER_OUT("ctrl", received_ctrl_pkts, sent_ctrl_pkts); OVPN_COUNTER_OUT("tunnel", tunnel_bytes_received, tunnel_bytes_received); OVPN_COUNTER_OUT("transport", transport_bytes_received, transport_bytes_received); #undef OVPN_COUNTER_OUT *onvl = nvl; return (0); error: nvlist_destroy(nvl); return (ret); } static int ovpn_get_peer_stats(struct ovpn_softc *sc, nvlist_t **nvl) { struct ovpn_kpeer *peer; nvlist_t *nvpeer = NULL; int ret; OVPN_RLOCK_TRACKER; *nvl = nvlist_create(0); if (*nvl == NULL) return (ENOMEM); #define OVPN_PEER_COUNTER_OUT(name, in, out) \ do { \ ret = ovpn_add_counters(nvpeer, name, \ OVPN_PEER_COUNTER(peer, in), OVPN_PEER_COUNTER(peer, out)); \ if (ret != 0) \ goto error; \ } while(0) OVPN_RLOCK(sc); RB_FOREACH(peer, ovpn_kpeers, &sc->peers) { nvpeer = nvlist_create(0); if (nvpeer == NULL) { OVPN_RUNLOCK(sc); nvlist_destroy(*nvl); *nvl = NULL; return (ENOMEM); } nvlist_add_number(nvpeer, "peerid", peer->peerid); OVPN_PEER_COUNTER_OUT("packets", pkt_in, pkt_out); OVPN_PEER_COUNTER_OUT("bytes", bytes_in, bytes_out); nvlist_append_nvlist_array(*nvl, "peers", nvpeer); nvlist_destroy(nvpeer); } #undef OVPN_PEER_COUNTER_OUT OVPN_RUNLOCK(sc); return (0); error: nvlist_destroy(nvpeer); nvlist_destroy(*nvl); *nvl = NULL; return (ret); } static int ovpn_poll_pkt(struct ovpn_softc *sc, nvlist_t **onvl) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (ENOMEM); nvlist_add_number(nvl, "pending", buf_ring_count(sc->notifring)); *onvl = nvl; return (0); } static void ovpn_notif_add_counters(nvlist_t *parent, struct ovpn_notification *n) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return; nvlist_add_number(nvl, "in", n->counters.pkt_in); nvlist_add_number(nvl, "out", n->counters.pkt_out); nvlist_add_nvlist(parent, "packets", nvl); nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) return; nvlist_add_number(nvl, "in", n->counters.bytes_in); nvlist_add_number(nvl, "out", n->counters.bytes_out); nvlist_add_nvlist(parent, "bytes", nvl); nvlist_destroy(nvl); } static int opvn_get_pkt(struct ovpn_softc *sc, nvlist_t **onvl) { struct ovpn_notification *n; nvlist_t *nvl; /* Check if we have notifications pending. */ n = buf_ring_dequeue_mc(sc->notifring); if (n == NULL) return (ENOENT); nvl = nvlist_create(0); if (nvl == NULL) { free(n, M_OVPN); return (ENOMEM); } nvlist_add_number(nvl, "peerid", n->peerid); nvlist_add_number(nvl, "notification", n->type); if (n->type == OVPN_NOTIF_DEL_PEER) { nvlist_add_number(nvl, "del_reason", n->del_reason); /* No error handling, because we want to send the notification * even if we can't attach the counters. */ ovpn_notif_add_counters(nvl, n); } free(n, M_OVPN); *onvl = nvl; return (0); } static int ovpn_ioctl_get(struct ifnet *ifp, struct ifdrv *ifd) { struct ovpn_softc *sc = ifp->if_softc; nvlist_t *nvl = NULL; int error; switch (ifd->ifd_cmd) { case OVPN_GET_STATS: error = ovpn_get_stats(sc, &nvl); break; case OVPN_GET_PEER_STATS: error = ovpn_get_peer_stats(sc, &nvl); break; case OVPN_POLL_PKT: error = ovpn_poll_pkt(sc, &nvl); break; case OVPN_GET_PKT: error = opvn_get_pkt(sc, &nvl); break; default: error = ENOTSUP; break; } if (error == 0) { void *packed = NULL; size_t len; MPASS(nvl != NULL); packed = nvlist_pack(nvl, &len); if (! packed) { nvlist_destroy(nvl); return (ENOMEM); } if (len > ifd->ifd_len) { free(packed, M_NVLIST); nvlist_destroy(nvl); return (ENOSPC); } error = copyout(packed, ifd->ifd_data, len); ifd->ifd_len = len; free(packed, M_NVLIST); nvlist_destroy(nvl); } return (error); } static int ovpn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifdrv *ifd; int error; CURVNET_ASSERT_SET(); switch (cmd) { case SIOCSDRVSPEC: case SIOCGDRVSPEC: error = priv_check(curthread, PRIV_NET_OVPN); if (error) return (error); break; } switch (cmd) { case SIOCSDRVSPEC: ifd = (struct ifdrv *)data; error = ovpn_ioctl_set(ifp, ifd); break; case SIOCGDRVSPEC: ifd = (struct ifdrv *)data; error = ovpn_ioctl_get(ifp, ifd); break; case SIOCSIFMTU: { struct ifreq *ifr = (struct ifreq *)data; if (ifr->ifr_mtu < OVPN_MTU_MIN || ifr->ifr_mtu > OVPN_MTU_MAX) return (EINVAL); ifp->if_mtu = ifr->ifr_mtu; return (0); } case SIOCSIFADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFMTU: case SIOCSIFFLAGS: return (0); default: error = EINVAL; } return (error); } static int ovpn_encrypt_tx_cb(struct cryptop *crp) { struct epoch_tracker et; struct ovpn_kpeer *peer = crp->crp_opaque; struct ovpn_softc *sc = peer->sc; struct mbuf *m = crp->crp_buf.cb_mbuf; int tunnel_len; int ret; CURVNET_SET(sc->ifp->if_vnet); NET_EPOCH_ENTER(et); if (crp->crp_etype != 0) { crypto_freereq(crp); ovpn_peer_release_ref(peer, false); NET_EPOCH_EXIT(et); CURVNET_RESTORE(); OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1); m_freem(m); return (0); } MPASS(crp->crp_buf.cb_type == CRYPTO_BUF_MBUF); tunnel_len = m->m_pkthdr.len - sizeof(struct ovpn_wire_header); ret = ovpn_encap(sc, peer->peerid, m); if (ret == 0) { OVPN_COUNTER_ADD(sc, sent_data_pkts, 1); OVPN_COUNTER_ADD(sc, tunnel_bytes_sent, tunnel_len); } crypto_freereq(crp); ovpn_peer_release_ref(peer, false); NET_EPOCH_EXIT(et); CURVNET_RESTORE(); return (0); } static void ovpn_finish_rx(struct ovpn_softc *sc, struct mbuf *m, struct ovpn_kpeer *peer, struct ovpn_kkey *key, uint32_t seq, struct rm_priotracker *_ovpn_lock_trackerp) { uint32_t af; OVPN_RASSERT(sc); NET_EPOCH_ASSERT(); /* Replay protection. */ if (V_replay_protection && ! ovpn_check_replay(key->decrypt, seq)) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1); m_freem(m); return; } critical_enter(); *zpcpu_get(peer->last_active) = time_uptime; critical_exit(); OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, received_data_pkts, 1); OVPN_COUNTER_ADD(sc, tunnel_bytes_received, m->m_pkthdr.len); OVPN_PEER_COUNTER_ADD(peer, pkt_in, 1); OVPN_PEER_COUNTER_ADD(peer, bytes_in, m->m_pkthdr.len); /* Receive the packet on our interface. */ m->m_pkthdr.rcvif = sc->ifp; /* Clear checksum flags in case the real hardware set them. */ m->m_pkthdr.csum_flags = 0; /* Clear mbuf tags & flags */ m_tag_delete_nonpersistent(m); m_clrprotoflags(m); /* Ensure we can read the first byte. */ m = m_pullup(m, 1); if (m == NULL) { OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1); return; } /* * Check for address family, and disregard any control packets (e.g. * keepalive). */ af = ovpn_get_af(m); if (af != 0) { BPF_MTAP2(sc->ifp, &af, sizeof(af), m); if (V_async_netisr_queue) netisr_queue(af == AF_INET ? NETISR_IP : NETISR_IPV6, m); else netisr_dispatch(af == AF_INET ? NETISR_IP : NETISR_IPV6, m); } else { OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1); m_freem(m); } } static struct ovpn_kkey * ovpn_find_key(struct ovpn_softc *sc, struct ovpn_kpeer *peer, const struct ovpn_wire_header *ohdr) { struct ovpn_kkey *key = NULL; uint8_t keyid; OVPN_RASSERT(sc); keyid = (ntohl(ohdr->opcode) >> 24) & 0x07; if (peer->keys[0].keyid == keyid) key = &peer->keys[0]; else if (peer->keys[1].keyid == keyid) key = &peer->keys[1]; return (key); } static int ovpn_decrypt_rx_cb(struct cryptop *crp) { struct epoch_tracker et; struct ovpn_softc *sc = crp->crp_opaque; struct mbuf *m = crp->crp_buf.cb_mbuf; struct ovpn_kkey *key; struct ovpn_kpeer *peer; struct ovpn_wire_header *ohdr; uint32_t peerid; OVPN_RLOCK_TRACKER; OVPN_RLOCK(sc); MPASS(crp->crp_buf.cb_type == CRYPTO_BUF_MBUF); if (crp->crp_etype != 0) { crypto_freereq(crp); atomic_add_int(&sc->refcount, -1); OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1); OVPN_RUNLOCK(sc); m_freem(m); return (0); } CURVNET_SET(sc->ifp->if_vnet); ohdr = mtodo(m, sizeof(struct udphdr)); peerid = ntohl(ohdr->opcode) & 0x00ffffff; peer = ovpn_find_peer(sc, peerid); if (peer == NULL) { /* No such peer. Drop packet. */ crypto_freereq(crp); atomic_add_int(&sc->refcount, -1); OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1); m_freem(m); CURVNET_RESTORE(); return (0); } key = ovpn_find_key(sc, peer, ohdr); if (key == NULL) { crypto_freereq(crp); atomic_add_int(&sc->refcount, -1); /* * Has this key been removed between us starting the decrypt * and finishing it? */ OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1); m_freem(m); CURVNET_RESTORE(); return (0); } /* Now remove the outer headers */ m_adj_decap(m, sizeof(struct udphdr) + sizeof(struct ovpn_wire_header)); NET_EPOCH_ENTER(et); ovpn_finish_rx(sc, m, peer, key, ntohl(ohdr->seq), _ovpn_lock_trackerp); NET_EPOCH_EXIT(et); OVPN_UNLOCK_ASSERT(sc); CURVNET_RESTORE(); crypto_freereq(crp); atomic_add_int(&sc->refcount, -1); return (0); } static int ovpn_get_af(struct mbuf *m) { struct ip *ip; struct ip6_hdr *ip6; /* * We should pullup, but we're only interested in the first byte, so * that'll always be contiguous. */ ip = mtod(m, struct ip *); if (ip->ip_v == IPVERSION) return (AF_INET); ip6 = mtod(m, struct ip6_hdr *); if ((ip6->ip6_vfc & IPV6_VERSION_MASK) == IPV6_VERSION) return (AF_INET6); return (0); } #ifdef INET static struct ovpn_kpeer * ovpn_find_peer_by_ip(struct ovpn_softc *sc, const struct in_addr addr) { struct ovpn_kpeer *peer = NULL; OVPN_ASSERT(sc); /* TODO: Add a second RB so we can look up by IP. */ RB_FOREACH(peer, ovpn_kpeers, &sc->peers) { if (addr.s_addr == peer->vpn4.s_addr) return (peer); } return (peer); } #endif #ifdef INET6 static struct ovpn_kpeer * ovpn_find_peer_by_ip6(struct ovpn_softc *sc, const struct in6_addr *addr) { struct ovpn_kpeer *peer = NULL; OVPN_ASSERT(sc); /* TODO: Add a third RB so we can look up by IPv6 address. */ RB_FOREACH(peer, ovpn_kpeers, &sc->peers) { if (memcmp(addr, &peer->vpn6, sizeof(*addr)) == 0) return (peer); } return (peer); } #endif static struct ovpn_kpeer * ovpn_route_peer(struct ovpn_softc *sc, struct mbuf **m0, const struct sockaddr *dst) { struct ovpn_kpeer *peer = NULL; int af; NET_EPOCH_ASSERT(); OVPN_ASSERT(sc); /* Shortcut if we're a client (or are a server and have only one client). */ if (sc->peercount == 1) return (ovpn_find_only_peer(sc)); if (dst != NULL) af = dst->sa_family; else af = ovpn_get_af(*m0); switch (af) { #ifdef INET case AF_INET: { const struct sockaddr_in *sa = (const struct sockaddr_in *)dst; struct nhop_object *nh; const struct in_addr *ip_dst; if (sa != NULL) { ip_dst = &sa->sin_addr; } else { struct ip *ip; *m0 = m_pullup(*m0, sizeof(struct ip)); if (*m0 == NULL) return (NULL); ip = mtod(*m0, struct ip *); ip_dst = &ip->ip_dst; } peer = ovpn_find_peer_by_ip(sc, *ip_dst); SDT_PROBE2(if_ovpn, tx, route, ip4, ip_dst, peer); if (peer == NULL) { nh = fib4_lookup(M_GETFIB(*m0), *ip_dst, 0, NHR_NONE, 0); if (nh && (nh->nh_flags & NHF_GATEWAY)) { peer = ovpn_find_peer_by_ip(sc, nh->gw4_sa.sin_addr); SDT_PROBE2(if_ovpn, tx, route, ip4, &nh->gw4_sa.sin_addr, peer); } } break; } #endif #ifdef INET6 case AF_INET6: { const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)dst; struct nhop_object *nh; const struct in6_addr *ip6_dst; if (sa6 != NULL) { ip6_dst = &sa6->sin6_addr; } else { struct ip6_hdr *ip6; *m0 = m_pullup(*m0, sizeof(struct ip6_hdr)); if (*m0 == NULL) return (NULL); ip6 = mtod(*m0, struct ip6_hdr *); ip6_dst = &ip6->ip6_dst; } peer = ovpn_find_peer_by_ip6(sc, ip6_dst); SDT_PROBE2(if_ovpn, tx, route, ip6, ip6_dst, peer); if (peer == NULL) { nh = fib6_lookup(M_GETFIB(*m0), ip6_dst, 0, NHR_NONE, 0); if (nh && (nh->nh_flags & NHF_GATEWAY)) { peer = ovpn_find_peer_by_ip6(sc, &nh->gw6_sa.sin6_addr); SDT_PROBE2(if_ovpn, tx, route, ip6, &nh->gw6_sa.sin6_addr, peer); } } break; } #endif } return (peer); } static int ovpn_transmit(struct ifnet *ifp, struct mbuf *m) { return (ifp->if_output(ifp, m, NULL, NULL)); } static int ovpn_transmit_to_peer(struct ifnet *ifp, struct mbuf *m, struct ovpn_kpeer *peer, struct rm_priotracker *_ovpn_lock_trackerp) { struct ovpn_wire_header *ohdr; struct ovpn_kkey *key; struct ovpn_softc *sc; struct cryptop *crp; uint32_t af, seq; uint64_t seq64; size_t len, ovpn_hdr_len; int tunnel_len; int ret; sc = ifp->if_softc; OVPN_RASSERT(sc); tunnel_len = m->m_pkthdr.len; key = &peer->keys[OVPN_KEY_SLOT_PRIMARY]; if (key->encrypt == NULL) { if (_ovpn_lock_trackerp != NULL) OVPN_RUNLOCK(sc); m_freem(m); return (ENOLINK); } af = ovpn_get_af(m); /* Don't capture control packets. */ if (af != 0) BPF_MTAP2(ifp, &af, sizeof(af), m); len = m->m_pkthdr.len; MPASS(len <= ifp->if_mtu); ovpn_hdr_len = sizeof(struct ovpn_wire_header); if (key->encrypt->cipher == OVPN_CIPHER_ALG_NONE) ovpn_hdr_len -= 16; /* No auth tag. */ M_PREPEND(m, ovpn_hdr_len, M_NOWAIT); if (m == NULL) { if (_ovpn_lock_trackerp != NULL) OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1); return (ENOBUFS); } ohdr = mtod(m, struct ovpn_wire_header *); ohdr->opcode = (OVPN_OP_DATA_V2 << OVPN_OP_SHIFT) | key->keyid; ohdr->opcode <<= 24; ohdr->opcode |= key->peerid; ohdr->opcode = htonl(ohdr->opcode); seq64 = atomic_fetchadd_64(&peer->keys[OVPN_KEY_SLOT_PRIMARY].encrypt->tx_seq, 1); if (seq64 == OVPN_SEQ_ROTATE) { ovpn_notify_key_rotation(sc, peer); } else if (seq64 > UINT32_MAX) { /* We've wrapped, give up on this packet. */ if (_ovpn_lock_trackerp != NULL) OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1); /* Let's avoid (very unlikely, but still) wraparounds of the * 64-bit counter taking us back to 0. */ atomic_store_64(&peer->keys[OVPN_KEY_SLOT_PRIMARY].encrypt->tx_seq, UINT32_MAX); return (ENOBUFS); } seq = htonl(seq64 & UINT32_MAX); ohdr->seq = seq; OVPN_PEER_COUNTER_ADD(peer, pkt_out, 1); OVPN_PEER_COUNTER_ADD(peer, bytes_out, len); if (key->encrypt->cipher == OVPN_CIPHER_ALG_NONE) { ret = ovpn_encap(sc, peer->peerid, m); if (_ovpn_lock_trackerp != NULL) OVPN_RUNLOCK(sc); if (ret == 0) { OVPN_COUNTER_ADD(sc, sent_data_pkts, 1); OVPN_COUNTER_ADD(sc, tunnel_bytes_sent, tunnel_len); } return (ret); } crp = crypto_getreq(key->encrypt->cryptoid, M_NOWAIT); if (crp == NULL) { if (_ovpn_lock_trackerp != NULL) OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1); m_freem(m); return (ENOBUFS); } /* Encryption covers only the payload, not the header. */ crp->crp_payload_start = sizeof(*ohdr); crp->crp_payload_length = len; crp->crp_op = CRYPTO_OP_ENCRYPT; /* * AAD data covers the ovpn_wire_header minus the auth * tag. */ crp->crp_aad_length = sizeof(*ohdr) - sizeof(ohdr->auth_tag); crp->crp_aad = ohdr; crp->crp_aad_start = 0; crp->crp_op |= CRYPTO_OP_COMPUTE_DIGEST; crp->crp_digest_start = offsetof(struct ovpn_wire_header, auth_tag); crp->crp_flags |= CRYPTO_F_IV_SEPARATE; memcpy(crp->crp_iv, &seq, sizeof(seq)); memcpy(crp->crp_iv + sizeof(seq), key->encrypt->nonce, key->encrypt->noncelen); crypto_use_mbuf(crp, m); crp->crp_flags |= CRYPTO_F_CBIFSYNC; crp->crp_callback = ovpn_encrypt_tx_cb; crp->crp_opaque = peer; atomic_add_int(&peer->refcount, 1); if (_ovpn_lock_trackerp != NULL) OVPN_RUNLOCK(sc); if (V_async_crypto) ret = crypto_dispatch_async(crp, CRYPTO_ASYNC_ORDERED); else ret = crypto_dispatch(crp); if (ret) { OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1); } return (ret); } /* * Note: Expects to hold the read lock on entry, and will release it itself. */ static int ovpn_encap(struct ovpn_softc *sc, uint32_t peerid, struct mbuf *m) { struct udphdr *udp; struct ovpn_kpeer *peer; int len; OVPN_RLOCK_TRACKER; OVPN_RLOCK(sc); NET_EPOCH_ASSERT(); peer = ovpn_find_peer(sc, peerid); if (peer == NULL || sc->ifp->if_link_state != LINK_STATE_UP) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1); m_freem(m); return (ENETDOWN); } len = m->m_pkthdr.len; M_PREPEND(m, sizeof(struct udphdr), M_NOWAIT); if (m == NULL) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1); m_freem(m); return (ENOBUFS); } udp = mtod(m, struct udphdr *); MPASS(peer->local.ss_family == peer->remote.ss_family); udp->uh_sport = ovpn_get_port(&peer->local); udp->uh_dport = ovpn_get_port(&peer->remote); udp->uh_ulen = htons(sizeof(struct udphdr) + len); switch (peer->remote.ss_family) { #ifdef INET case AF_INET: { struct sockaddr_in *in_local = TO_IN(&peer->local); struct sockaddr_in *in_remote = TO_IN(&peer->remote); struct ip *ip; /* * This requires knowing the source IP, which we don't. Happily * we're allowed to keep this at 0, and the checksum won't do * anything the crypto won't already do. */ udp->uh_sum = 0; /* Set the checksum flags so we recalculate checksums. */ m->m_pkthdr.csum_flags |= CSUM_IP; m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); M_PREPEND(m, sizeof(struct ip), M_NOWAIT); if (m == NULL) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1); return (ENOBUFS); } ip = mtod(m, struct ip *); ip->ip_tos = 0; ip->ip_len = htons(sizeof(struct ip) + sizeof(struct udphdr) + len); ip->ip_off = 0; ip->ip_ttl = V_ip_defttl; ip->ip_p = IPPROTO_UDP; ip->ip_sum = 0; if (in_local->sin_port != 0) ip->ip_src = in_local->sin_addr; else ip->ip_src.s_addr = INADDR_ANY; ip->ip_dst = in_remote->sin_addr; OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, transport_bytes_sent, m->m_pkthdr.len); return (ip_output(m, NULL, NULL, 0, NULL, NULL)); } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *in6_local = TO_IN6(&peer->local); struct sockaddr_in6 *in6_remote = TO_IN6(&peer->remote); struct ip6_hdr *ip6; M_PREPEND(m, sizeof(struct ip6_hdr), M_NOWAIT); if (m == NULL) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1); return (ENOBUFS); } m = m_pullup(m, sizeof(*ip6) + sizeof(*udp)); if (m == NULL) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1); return (ENOBUFS); } ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK; ip6->ip6_plen = htons(sizeof(*ip6) + sizeof(struct udphdr) + len); ip6->ip6_nxt = IPPROTO_UDP; ip6->ip6_hlim = V_ip6_defhlim; memcpy(&ip6->ip6_src, &in6_local->sin6_addr, sizeof(ip6->ip6_src)); memcpy(&ip6->ip6_dst, &in6_remote->sin6_addr, sizeof(ip6->ip6_dst)); udp = mtodo(m, sizeof(*ip6)); udp->uh_sum = in6_cksum_pseudo(ip6, m->m_pkthdr.len - sizeof(struct ip6_hdr), IPPROTO_UDP, 0); m->m_pkthdr.csum_flags |= CSUM_UDP_IPV6; m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, transport_bytes_sent, m->m_pkthdr.len); return (ip6_output(m, NULL, NULL, IPV6_UNSPECSRC, NULL, NULL, NULL)); } #endif default: panic("Unsupported address family %d", peer->remote.ss_family); } } static int ovpn_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct ovpn_softc *sc; struct ovpn_kpeer *peer; OVPN_RLOCK_TRACKER; sc = ifp->if_softc; OVPN_RLOCK(sc); SDT_PROBE1(if_ovpn, tx, transmit, start, m); if (__predict_false(ifp->if_link_state != LINK_STATE_UP)) { OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1); OVPN_RUNLOCK(sc); m_freem(m); return (ENETDOWN); } /** * Only obey 'dst' (i.e. the gateway) if no route is supplied. * That's our indication that we're being called through pf's route-to, * and we should route according to 'dst' instead. We can't do so * consistently, because the usual openvpn configuration sets the first * non-server IP in the subnet as the gateway. If we always use that * one we'd end up routing all traffic to the first client. * tl;dr: 'ro == NULL' tells us pf is doing a route-to, and then but * only then, we should treat 'dst' as the destination. */ peer = ovpn_route_peer(sc, &m, ro == NULL ? dst : NULL); if (peer == NULL) { /* No destination. */ OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1); OVPN_RUNLOCK(sc); m_freem(m); return (ENETDOWN); } return (ovpn_transmit_to_peer(ifp, m, peer, _ovpn_lock_trackerp)); } static bool ovpn_check_replay(struct ovpn_kkey_dir *key, uint32_t seq) { uint32_t d; mtx_lock(&key->replay_mtx); /* Sequence number must be strictly greater than rx_seq */ if (seq <= key->rx_seq) { mtx_unlock(&key->replay_mtx); return (false); } /* Large jump. The packet authenticated okay, so just accept that. */ if (seq > (key->rx_seq + (sizeof(key->rx_window) * 8))) { key->rx_seq = seq; key->rx_window = 0; mtx_unlock(&key->replay_mtx); return (true); } /* Happy case. */ if ((seq == key->rx_seq + 1) && key->rx_window == 0) { key->rx_seq++; mtx_unlock(&key->replay_mtx); return (true); } d = seq - key->rx_seq - 1; if (key->rx_window & ((uint64_t)1 << d)) { /* Dupe! */ mtx_unlock(&key->replay_mtx); return (false); } key->rx_window |= (uint64_t)1 << d; while (key->rx_window & 1) { key->rx_seq++; key->rx_window >>= 1; } mtx_unlock(&key->replay_mtx); return (true); } static struct ovpn_kpeer * ovpn_peer_from_mbuf(struct ovpn_softc *sc, struct mbuf *m, int off) { struct ovpn_wire_header ohdr; uint32_t peerid; const size_t hdrlen = sizeof(ohdr) - sizeof(ohdr.auth_tag); OVPN_RASSERT(sc); if (m_length(m, NULL) < (off + sizeof(struct udphdr) + hdrlen)) return (NULL); m_copydata(m, off + sizeof(struct udphdr), hdrlen, (caddr_t)&ohdr); peerid = ntohl(ohdr.opcode) & 0x00ffffff; return (ovpn_find_peer(sc, peerid)); } static bool ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp, const struct sockaddr *sa, void *ctx) { struct ovpn_softc *sc = ctx; struct ovpn_wire_header tmphdr; struct ovpn_wire_header *ohdr; struct udphdr *uhdr; struct ovpn_kkey *key; struct cryptop *crp; struct ovpn_kpeer *peer; size_t ohdrlen; int ret; uint8_t op; OVPN_RLOCK_TRACKER; M_ASSERTPKTHDR(m); OVPN_COUNTER_ADD(sc, transport_bytes_received, m->m_pkthdr.len - off); ohdrlen = sizeof(*ohdr) - sizeof(ohdr->auth_tag); OVPN_RLOCK(sc); peer = ovpn_peer_from_mbuf(sc, m, off); if (peer == NULL) { OVPN_RUNLOCK(sc); return (false); } if (m_length(m, NULL) < (off + sizeof(*uhdr) + ohdrlen)) { /* Short packet. */ OVPN_RUNLOCK(sc); return (false); } m_copydata(m, off + sizeof(*uhdr), ohdrlen, (caddr_t)&tmphdr); op = ntohl(tmphdr.opcode) >> 24 >> OVPN_OP_SHIFT; if (op != OVPN_OP_DATA_V2) { /* Control packet? */ OVPN_RUNLOCK(sc); return (false); } m = m_pullup(m, off + sizeof(*uhdr) + ohdrlen); if (m == NULL) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1); return (true); } /* * Simplify things by getting rid of the preceding headers, we don't * care about them. */ m_adj_decap(m, off); uhdr = mtodo(m, 0); ohdr = mtodo(m, sizeof(*uhdr)); key = ovpn_find_key(sc, peer, ohdr); if (key == NULL || key->decrypt == NULL) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1); m_freem(m); return (true); } if (key->decrypt->cipher == OVPN_CIPHER_ALG_NONE) { /* Now remove the outer headers */ m_adj_decap(m, sizeof(struct udphdr) + ohdrlen); ohdr = mtodo(m, sizeof(*uhdr)); ovpn_finish_rx(sc, m, peer, key, ntohl(ohdr->seq), _ovpn_lock_trackerp); OVPN_UNLOCK_ASSERT(sc); return (true); } ohdrlen += sizeof(ohdr->auth_tag); m = m_pullup(m, sizeof(*uhdr) + ohdrlen); if (m == NULL) { OVPN_RUNLOCK(sc); OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1); return (true); } uhdr = mtodo(m, 0); ohdr = mtodo(m, sizeof(*uhdr)); /* Decrypt */ crp = crypto_getreq(key->decrypt->cryptoid, M_NOWAIT); if (crp == NULL) { OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1); OVPN_RUNLOCK(sc); m_freem(m); return (true); } crp->crp_payload_start = sizeof(struct udphdr) + sizeof(*ohdr); crp->crp_payload_length = ntohs(uhdr->uh_ulen) - sizeof(*uhdr) - sizeof(*ohdr); crp->crp_op = CRYPTO_OP_DECRYPT; /* AAD validation. */ crp->crp_aad_length = sizeof(*ohdr) - sizeof(ohdr->auth_tag); crp->crp_aad = ohdr; crp->crp_aad_start = 0; crp->crp_op |= CRYPTO_OP_VERIFY_DIGEST; crp->crp_digest_start = sizeof(struct udphdr) + offsetof(struct ovpn_wire_header, auth_tag); crp->crp_flags |= CRYPTO_F_IV_SEPARATE; memcpy(crp->crp_iv, &ohdr->seq, sizeof(ohdr->seq)); memcpy(crp->crp_iv + sizeof(ohdr->seq), key->decrypt->nonce, key->decrypt->noncelen); crypto_use_mbuf(crp, m); crp->crp_flags |= CRYPTO_F_CBIFSYNC; crp->crp_callback = ovpn_decrypt_rx_cb; crp->crp_opaque = sc; atomic_add_int(&sc->refcount, 1); OVPN_RUNLOCK(sc); if (V_async_crypto) ret = crypto_dispatch_async(crp, CRYPTO_ASYNC_ORDERED); else ret = crypto_dispatch(crp); if (ret != 0) { OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1); } return (true); } static void ovpn_qflush(struct ifnet *ifp __unused) { } static void ovpn_flush_rxring(struct ovpn_softc *sc) { struct ovpn_notification *n; OVPN_WASSERT(sc); while (! buf_ring_empty(sc->notifring)) { n = buf_ring_dequeue_sc(sc->notifring); free(n, M_OVPN); } } #ifdef VIMAGE static void ovpn_reassign(struct ifnet *ifp, struct vnet *new_vnet __unused, char *unused __unused) { struct ovpn_softc *sc = ifp->if_softc; struct ovpn_kpeer *peer, *tmppeer; int ret __diagused; OVPN_WLOCK(sc); /* Flush keys & configuration. */ RB_FOREACH_SAFE(peer, ovpn_kpeers, &sc->peers, tmppeer) { peer->del_reason = OVPN_DEL_REASON_REQUESTED; ret = _ovpn_del_peer(sc, peer); MPASS(ret == 0); } ovpn_flush_rxring(sc); OVPN_WUNLOCK(sc); } #endif static int ovpn_clone_match(struct if_clone *ifc, const char *name) { /* * Allow all names that start with 'ovpn', specifically because pfSense * uses ovpnc1 / ovpns2 */ return (strncmp(ovpnname, name, strlen(ovpnname)) == 0); } static int ovpn_clone_create(struct if_clone *ifc, char *name, size_t len, struct ifc_data *ifd, struct ifnet **ifpp) { struct ovpn_softc *sc; struct ifnet *ifp; char *dp; int error, unit, wildcard; /* Try to see if a special unit was requested. */ error = ifc_name2unit(name, &unit); if (error != 0) return (error); wildcard = (unit < 0); error = ifc_alloc_unit(ifc, &unit); if (error != 0) return (error); /* * If no unit had been given, we need to adjust the ifName. */ for (dp = name; *dp != '\0'; dp++); if (wildcard) { error = snprintf(dp, len - (dp - name), "%d", unit); if (error > len - (dp - name)) { /* ifName too long. */ ifc_free_unit(ifc, unit); return (ENOSPC); } dp += error; } /* Make sure it doesn't already exist. */ if (ifunit(name) != NULL) return (EEXIST); sc = malloc(sizeof(struct ovpn_softc), M_OVPN, M_WAITOK | M_ZERO); sc->ifp = if_alloc(IFT_ENC); rm_init_flags(&sc->lock, "if_ovpn_lock", RM_RECURSE); sc->refcount = 0; sc->notifring = buf_ring_alloc(32, M_OVPN, M_WAITOK, NULL); COUNTER_ARRAY_ALLOC(sc->counters, OVPN_COUNTER_SIZE, M_WAITOK); ifp = sc->ifp; ifp->if_softc = sc; strlcpy(ifp->if_xname, name, IFNAMSIZ); ifp->if_dname = ovpngroupname; ifp->if_dunit = unit; ifp->if_addrlen = 0; ifp->if_mtu = 1428; ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; ifp->if_ioctl = ovpn_ioctl; ifp->if_transmit = ovpn_transmit; ifp->if_output = ovpn_output; ifp->if_qflush = ovpn_qflush; #ifdef VIMAGE ifp->if_reassign = ovpn_reassign; #endif ifp->if_capabilities |= IFCAP_LINKSTATE; ifp->if_capenable |= IFCAP_LINKSTATE; if_attach(ifp); bpfattach(ifp, DLT_NULL, sizeof(uint32_t)); *ifpp = ifp; return (0); } static void ovpn_clone_destroy_cb(struct epoch_context *ctx) { struct ovpn_softc *sc; sc = __containerof(ctx, struct ovpn_softc, epoch_ctx); MPASS(sc->peercount == 0); MPASS(RB_EMPTY(&sc->peers)); COUNTER_ARRAY_FREE(sc->counters, OVPN_COUNTER_SIZE); if_free(sc->ifp); free(sc, M_OVPN); } static int ovpn_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags) { struct ovpn_softc *sc; struct ovpn_kpeer *peer, *tmppeer; int unit; int ret __diagused; sc = ifp->if_softc; unit = ifp->if_dunit; OVPN_WLOCK(sc); if (atomic_load_int(&sc->refcount) > 0) { OVPN_WUNLOCK(sc); return (EBUSY); } RB_FOREACH_SAFE(peer, ovpn_kpeers, &sc->peers, tmppeer) { peer->del_reason = OVPN_DEL_REASON_REQUESTED; ret = _ovpn_del_peer(sc, peer); MPASS(ret == 0); } ovpn_flush_rxring(sc); buf_ring_free(sc->notifring, M_OVPN); OVPN_WUNLOCK(sc); bpfdetach(ifp); if_detach(ifp); ifp->if_softc = NULL; NET_EPOCH_CALL(ovpn_clone_destroy_cb, &sc->epoch_ctx); if (unit != IF_DUNIT_NONE) ifc_free_unit(ifc, unit); NET_EPOCH_DRAIN_CALLBACKS(); return (0); } static void vnet_ovpn_init(const void *unused __unused) { struct if_clone_addreq req = { .match_f = ovpn_clone_match, .create_f = ovpn_clone_create, .destroy_f = ovpn_clone_destroy, }; V_ovpn_cloner = ifc_attach_cloner(ovpngroupname, &req); } VNET_SYSINIT(vnet_ovpn_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_ovpn_init, NULL); static void vnet_ovpn_uninit(const void *unused __unused) { if_clone_detach(V_ovpn_cloner); } VNET_SYSUNINIT(vnet_ovpn_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_ovpn_uninit, NULL); static int ovpnmodevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: /* Done in vnet_ovpn_init() */ break; case MOD_UNLOAD: /* Done in vnet_ovpn_uninit() */ break; default: return (EOPNOTSUPP); } return (0); } static moduledata_t ovpn_mod = { "if_ovpn", ovpnmodevent, 0 }; DECLARE_MODULE(if_ovpn, ovpn_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_ovpn, 1); diff --git a/sys/netgraph/bluetooth/include/ng_btsocket_hci_raw.h b/sys/netgraph/bluetooth/include/ng_btsocket_hci_raw.h index 05c109df1570..b55c95b29ee3 100644 --- a/sys/netgraph/bluetooth/include/ng_btsocket_hci_raw.h +++ b/sys/netgraph/bluetooth/include/ng_btsocket_hci_raw.h @@ -1,89 +1,88 @@ /* * ng_btsocket_hci_raw.h */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_hci_raw.h,v 1.3 2003/03/25 23:53:32 max Exp $ */ #ifndef _NETGRAPH_BTSOCKET_HCI_RAW_H_ #define _NETGRAPH_BTSOCKET_HCI_RAW_H_ #define NG_BTSOCKET_HCI_RAW_SENDSPACE (4 * 1024) #define NG_BTSOCKET_HCI_RAW_RECVSPACE (4 * 1024) /* * Bluetooth raw HCI socket PCB */ struct ng_btsocket_hci_raw_pcb { struct socket *so; /* socket */ u_int32_t flags; /* flags */ #define NG_BTSOCKET_HCI_RAW_DIRECTION (1 << 0) #define NG_BTSOCKET_HCI_RAW_PRIVILEGED (1 << 1) struct sockaddr_hci addr; /* local address */ struct ng_btsocket_hci_raw_filter filter; /* filter */ u_int32_t token; /* message token */ struct ng_mesg *msg; /* message */ LIST_ENTRY(ng_btsocket_hci_raw_pcb) next; /* link to next */ struct mtx pcb_mtx; /* pcb mutex */ }; typedef struct ng_btsocket_hci_raw_pcb ng_btsocket_hci_raw_pcb_t; typedef struct ng_btsocket_hci_raw_pcb * ng_btsocket_hci_raw_pcb_p; #define so2hci_raw_pcb(so) \ ((struct ng_btsocket_hci_raw_pcb *)((so)->so_pcb)) /* * Bluetooth raw HCI socket methods */ #ifdef _KERNEL void ng_btsocket_hci_raw_abort (struct socket *); void ng_btsocket_hci_raw_close (struct socket *); int ng_btsocket_hci_raw_attach (struct socket *, int, struct thread *); int ng_btsocket_hci_raw_bind (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_hci_raw_connect (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_hci_raw_control (struct socket *, u_long, void *, struct ifnet *, struct thread *); int ng_btsocket_hci_raw_ctloutput (struct socket *, struct sockopt *); void ng_btsocket_hci_raw_detach (struct socket *); int ng_btsocket_hci_raw_disconnect (struct socket *); -int ng_btsocket_hci_raw_peeraddr (struct socket *, struct sockaddr **); int ng_btsocket_hci_raw_send (struct socket *, int, struct mbuf *, struct sockaddr *, struct mbuf *, struct thread *); -int ng_btsocket_hci_raw_sockaddr (struct socket *, struct sockaddr **); +int ng_btsocket_hci_raw_sockaddr (struct socket *, struct sockaddr *); #endif /* _KERNEL */ #endif /* ndef _NETGRAPH_BTSOCKET_HCI_RAW_H_ */ diff --git a/sys/netgraph/bluetooth/include/ng_btsocket_l2cap.h b/sys/netgraph/bluetooth/include/ng_btsocket_l2cap.h index b512112f8b7d..7181e369119d 100644 --- a/sys/netgraph/bluetooth/include/ng_btsocket_l2cap.h +++ b/sys/netgraph/bluetooth/include/ng_btsocket_l2cap.h @@ -1,214 +1,213 @@ /* * ng_btsocket_l2cap.h */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_l2cap.h,v 1.4 2003/03/25 23:53:33 max Exp $ */ #ifndef _NETGRAPH_BTSOCKET_L2CAP_H_ #define _NETGRAPH_BTSOCKET_L2CAP_H_ /* * L2CAP routing entry */ struct ng_hook; struct ng_message; struct ng_btsocket_l2cap_rtentry { bdaddr_t src; /* source BD_ADDR */ struct ng_hook *hook; /* downstream hook */ LIST_ENTRY(ng_btsocket_l2cap_rtentry) next; /* link to next */ }; typedef struct ng_btsocket_l2cap_rtentry ng_btsocket_l2cap_rtentry_t; typedef struct ng_btsocket_l2cap_rtentry * ng_btsocket_l2cap_rtentry_p; /***************************************************************************** ***************************************************************************** ** SOCK_RAW L2CAP sockets ** ***************************************************************************** *****************************************************************************/ #define NG_BTSOCKET_L2CAP_RAW_SENDSPACE NG_L2CAP_MTU_DEFAULT #define NG_BTSOCKET_L2CAP_RAW_RECVSPACE NG_L2CAP_MTU_DEFAULT /* * Bluetooth raw L2CAP socket PCB */ struct ng_btsocket_l2cap_raw_pcb { struct socket *so; /* socket */ u_int32_t flags; /* flags */ #define NG_BTSOCKET_L2CAP_RAW_PRIVILEGED (1 << 0) bdaddr_t src; /* source address */ bdaddr_t dst; /* dest address */ uint8_t srctype;/*source addr type*/ uint8_t dsttype;/*source addr type*/ ng_btsocket_l2cap_rtentry_p rt; /* routing info */ u_int32_t token; /* message token */ struct ng_mesg *msg; /* message */ struct mtx pcb_mtx; /* pcb mutex */ LIST_ENTRY(ng_btsocket_l2cap_raw_pcb) next; /* link to next PCB */ }; typedef struct ng_btsocket_l2cap_raw_pcb ng_btsocket_l2cap_raw_pcb_t; typedef struct ng_btsocket_l2cap_raw_pcb * ng_btsocket_l2cap_raw_pcb_p; #define so2l2cap_raw_pcb(so) \ ((struct ng_btsocket_l2cap_raw_pcb *)((so)->so_pcb)) /* * Bluetooth raw L2CAP socket methods */ #ifdef _KERNEL void ng_btsocket_l2cap_raw_abort (struct socket *); void ng_btsocket_l2cap_raw_close (struct socket *); int ng_btsocket_l2cap_raw_attach (struct socket *, int, struct thread *); int ng_btsocket_l2cap_raw_bind (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_l2cap_raw_connect (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_l2cap_raw_control (struct socket *, u_long, void *, struct ifnet *, struct thread *); void ng_btsocket_l2cap_raw_detach (struct socket *); int ng_btsocket_l2cap_raw_disconnect (struct socket *); -int ng_btsocket_l2cap_raw_peeraddr (struct socket *, struct sockaddr **); +int ng_btsocket_l2cap_raw_peeraddr (struct socket *, struct sockaddr *); int ng_btsocket_l2cap_raw_send (struct socket *, int, struct mbuf *, struct sockaddr *, struct mbuf *, struct thread *); -int ng_btsocket_l2cap_raw_sockaddr (struct socket *, struct sockaddr **); +int ng_btsocket_l2cap_raw_sockaddr (struct socket *, struct sockaddr *); #endif /* _KERNEL */ /***************************************************************************** ***************************************************************************** ** SOCK_SEQPACKET L2CAP sockets ** ***************************************************************************** *****************************************************************************/ #define NG_BTSOCKET_L2CAP_SENDSPACE NG_L2CAP_MTU_DEFAULT /* (64 * 1024) */ #define NG_BTSOCKET_L2CAP_RECVSPACE (64 * 1024) /* * Bluetooth L2CAP socket PCB */ struct ng_btsocket_l2cap_pcb { struct socket *so; /* Pointer to socket */ bdaddr_t src; /* Source address */ bdaddr_t dst; /* Destination address */ uint8_t srctype; /*source addr type*/ uint8_t dsttype; /*source addr type*/ u_int16_t psm; /* PSM */ u_int16_t cid; /* Local channel ID */ uint8_t idtype; u_int16_t flags; /* socket flags */ #define NG_BTSOCKET_L2CAP_CLIENT (1 << 0) /* socket is client */ #define NG_BTSOCKET_L2CAP_TIMO (1 << 1) /* timeout pending */ u_int8_t state; /* socket state */ #define NG_BTSOCKET_L2CAP_CLOSED 0 /* socket closed */ #define NG_BTSOCKET_L2CAP_CONNECTING 1 /* wait for connect */ #define NG_BTSOCKET_L2CAP_CONFIGURING 2 /* wait for config */ #define NG_BTSOCKET_L2CAP_OPEN 3 /* socket open */ #define NG_BTSOCKET_L2CAP_DISCONNECTING 4 /* wait for disconnect */ #define NG_BTSOCKET_L2CAP_W4_ENC_CHANGE 5 u_int8_t cfg_state; /* config state */ #define NG_BTSOCKET_L2CAP_CFG_IN (1 << 0) /* incoming path done */ #define NG_BTSOCKET_L2CAP_CFG_OUT (1 << 1) /* outgoing path done */ #define NG_BTSOCKET_L2CAP_CFG_BOTH \ (NG_BTSOCKET_L2CAP_CFG_IN | NG_BTSOCKET_L2CAP_CFG_OUT) #define NG_BTSOCKET_L2CAP_CFG_IN_SENT (1 << 2) /* L2CAP ConfigReq sent */ #define NG_BTSOCKET_L2CAP_CFG_OUT_SENT (1 << 3) /* ---/--- */ uint8_t encryption; u_int16_t imtu; /* Incoming MTU */ ng_l2cap_flow_t iflow; /* Input flow spec */ u_int16_t omtu; /* Outgoing MTU */ ng_l2cap_flow_t oflow; /* Outgoing flow spec */ u_int16_t flush_timo; /* flush timeout */ u_int16_t link_timo; /* link timeout */ struct callout timo; /* timeout */ u_int32_t token; /* message token */ ng_btsocket_l2cap_rtentry_p rt; /* routing info */ struct mtx pcb_mtx; /* pcb mutex */ uint16_t need_encrypt; /*encryption needed*/ LIST_ENTRY(ng_btsocket_l2cap_pcb) next; /* link to next PCB */ }; typedef struct ng_btsocket_l2cap_pcb ng_btsocket_l2cap_pcb_t; typedef struct ng_btsocket_l2cap_pcb * ng_btsocket_l2cap_pcb_p; #define so2l2cap_pcb(so) \ ((struct ng_btsocket_l2cap_pcb *)((so)->so_pcb)) /* * Bluetooth L2CAP socket methods */ #ifdef _KERNEL void ng_btsocket_l2cap_abort (struct socket *); void ng_btsocket_l2cap_close (struct socket *); -int ng_btsocket_l2cap_accept (struct socket *, struct sockaddr *); int ng_btsocket_l2cap_attach (struct socket *, int, struct thread *); int ng_btsocket_l2cap_bind (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_l2cap_connect (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_l2cap_control (struct socket *, u_long, void *, struct ifnet *, struct thread *); int ng_btsocket_l2cap_ctloutput (struct socket *, struct sockopt *); void ng_btsocket_l2cap_detach (struct socket *); int ng_btsocket_l2cap_disconnect (struct socket *); int ng_btsocket_l2cap_listen (struct socket *, int, struct thread *); -int ng_btsocket_l2cap_peeraddr (struct socket *, struct sockaddr **); +int ng_btsocket_l2cap_peeraddr (struct socket *, struct sockaddr *); int ng_btsocket_l2cap_send (struct socket *, int, struct mbuf *, struct sockaddr *, struct mbuf *, struct thread *); -int ng_btsocket_l2cap_sockaddr (struct socket *, struct sockaddr **); +int ng_btsocket_l2cap_sockaddr (struct socket *, struct sockaddr *); #endif /* _KERNEL */ #endif /* _NETGRAPH_BTSOCKET_L2CAP_H_ */ diff --git a/sys/netgraph/bluetooth/include/ng_btsocket_rfcomm.h b/sys/netgraph/bluetooth/include/ng_btsocket_rfcomm.h index d40b694ece14..a5448ed943a2 100644 --- a/sys/netgraph/bluetooth/include/ng_btsocket_rfcomm.h +++ b/sys/netgraph/bluetooth/include/ng_btsocket_rfcomm.h @@ -1,339 +1,339 @@ /* * ng_btsocket_rfcomm.h */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2003 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_rfcomm.h,v 1.10 2003/03/29 22:27:42 max Exp $ */ #ifndef _NETGRAPH_BTSOCKET_RFCOMM_H_ #define _NETGRAPH_BTSOCKET_RFCOMM_H_ /***************************************************************************** ***************************************************************************** ** RFCOMM ** ***************************************************************************** *****************************************************************************/ /* XXX FIXME this does not belong here */ #define RFCOMM_DEFAULT_MTU 667 #define RFCOMM_MAX_MTU 1024 #define RFCOMM_DEFAULT_CREDITS 7 #define RFCOMM_MAX_CREDITS 40 /* RFCOMM frame types */ #define RFCOMM_FRAME_SABM 0x2f #define RFCOMM_FRAME_DISC 0x43 #define RFCOMM_FRAME_UA 0x63 #define RFCOMM_FRAME_DM 0x0f #define RFCOMM_FRAME_UIH 0xef /* RFCOMM MCC commands */ #define RFCOMM_MCC_TEST 0x08 /* Test */ #define RFCOMM_MCC_FCON 0x28 /* Flow Control on */ #define RFCOMM_MCC_FCOFF 0x18 /* Flow Control off */ #define RFCOMM_MCC_MSC 0x38 /* Modem Status Command */ #define RFCOMM_MCC_RPN 0x24 /* Remote Port Negotiation */ #define RFCOMM_MCC_RLS 0x14 /* Remote Line Status */ #define RFCOMM_MCC_PN 0x20 /* Port Negotiation */ #define RFCOMM_MCC_NSC 0x04 /* Non Supported Command */ /* RFCOMM modem signals */ #define RFCOMM_MODEM_FC 0x02 /* Flow Control asserted */ #define RFCOMM_MODEM_RTC 0x04 /* Ready To Communicate */ #define RFCOMM_MODEM_RTR 0x08 /* Ready To Receive */ #define RFCOMM_MODEM_IC 0x40 /* Incoming Call */ #define RFCOMM_MODEM_DV 0x80 /* Data Valid */ /* RPN parameters - baud rate */ #define RFCOMM_RPN_BR_2400 0x0 #define RFCOMM_RPN_BR_4800 0x1 #define RFCOMM_RPN_BR_7200 0x2 #define RFCOMM_RPN_BR_9600 0x3 #define RFCOMM_RPN_BR_19200 0x4 #define RFCOMM_RPN_BR_38400 0x5 #define RFCOMM_RPN_BR_57600 0x6 #define RFCOMM_RPN_BR_115200 0x7 #define RFCOMM_RPN_BR_230400 0x8 /* RPN parameters - data bits */ #define RFCOMM_RPN_DATA_5 0x0 #define RFCOMM_RPN_DATA_6 0x2 #define RFCOMM_RPN_DATA_7 0x1 #define RFCOMM_RPN_DATA_8 0x3 /* RPN parameters - stop bit */ #define RFCOMM_RPN_STOP_1 0 #define RFCOMM_RPN_STOP_15 1 /* RPN parameters - parity */ #define RFCOMM_RPN_PARITY_NONE 0x0 #define RFCOMM_RPN_PARITY_ODD 0x4 #define RFCOMM_RPN_PARITY_EVEN 0x5 #define RFCOMM_RPN_PARITY_MARK 0x6 #define RFCOMM_RPN_PARITY_SPACE 0x7 /* RPN parameters - flow control */ #define RFCOMM_RPN_FLOW_NONE 0x00 #define RFCOMM_RPN_XON_CHAR 0x11 #define RFCOMM_RPN_XOFF_CHAR 0x13 /* RPN parameters - mask */ #define RFCOMM_RPN_PM_BITRATE 0x0001 #define RFCOMM_RPN_PM_DATA 0x0002 #define RFCOMM_RPN_PM_STOP 0x0004 #define RFCOMM_RPN_PM_PARITY 0x0008 #define RFCOMM_RPN_PM_PARITY_TYPE 0x0010 #define RFCOMM_RPN_PM_XON 0x0020 #define RFCOMM_RPN_PM_XOFF 0x0040 #define RFCOMM_RPN_PM_FLOW 0x3F00 #define RFCOMM_RPN_PM_ALL 0x3F7F /* RFCOMM frame header */ struct rfcomm_frame_hdr { u_int8_t address; u_int8_t control; u_int8_t length; /* Actual size could be 2 bytes */ } __attribute__ ((packed)); /* RFCOMM command frame header */ struct rfcomm_cmd_hdr { u_int8_t address; u_int8_t control; u_int8_t length; u_int8_t fcs; } __attribute__ ((packed)); /* RFCOMM MCC command header */ struct rfcomm_mcc_hdr { u_int8_t type; u_int8_t length; /* XXX FIXME Can actual size be 2 bytes?? */ } __attribute__ ((packed)); /* RFCOMM MSC command */ struct rfcomm_mcc_msc { u_int8_t address; u_int8_t modem; } __attribute__ ((packed)); /* RFCOMM RPN command */ struct rfcomm_mcc_rpn { u_int8_t dlci; u_int8_t bit_rate; u_int8_t line_settings; u_int8_t flow_control; u_int8_t xon_char; u_int8_t xoff_char; u_int16_t param_mask; } __attribute__ ((packed)); /* RFCOMM RLS command */ struct rfcomm_mcc_rls { u_int8_t address; u_int8_t status; } __attribute__ ((packed)); /* RFCOMM PN command */ struct rfcomm_mcc_pn { u_int8_t dlci; u_int8_t flow_control; u_int8_t priority; u_int8_t ack_timer; u_int16_t mtu; u_int8_t max_retrans; u_int8_t credits; } __attribute__ ((packed)); /* RFCOMM frame parsing macros */ #define RFCOMM_DLCI(b) (((b) & 0xfc) >> 2) #define RFCOMM_CHANNEL(b) (((b) & 0xf8) >> 3) #define RFCOMM_DIRECTION(b) (((b) & 0x04) >> 2) #define RFCOMM_TYPE(b) (((b) & 0xef)) #define RFCOMM_EA(b) (((b) & 0x01)) #define RFCOMM_CR(b) (((b) & 0x02) >> 1) #define RFCOMM_PF(b) (((b) & 0x10) >> 4) #define RFCOMM_SRVCHANNEL(dlci) ((dlci) >> 1) #define RFCOMM_MKADDRESS(cr, dlci) \ ((((dlci) & 0x3f) << 2) | ((cr) << 1) | 0x01) #define RFCOMM_MKCONTROL(type, pf) ((((type) & 0xef) | ((pf) << 4))) #define RFCOMM_MKDLCI(dir, channel) ((((channel) & 0x1f) << 1) | (dir)) #define RFCOMM_MKLEN8(len) (((len) << 1) | 1) #define RFCOMM_MKLEN16(len) ((len) << 1) /* RFCOMM MCC macros */ #define RFCOMM_MCC_TYPE(b) (((b) & 0xfc) >> 2) #define RFCOMM_MCC_LENGTH(b) (((b) & 0xfe) >> 1) #define RFCOMM_MKMCC_TYPE(cr, type) ((((type) << 2) | ((cr) << 1) | 0x01)) /* RPN macros */ #define RFCOMM_RPN_DATA_BITS(line) ((line) & 0x3) #define RFCOMM_RPN_STOP_BITS(line) (((line) >> 2) & 0x1) #define RFCOMM_RPN_PARITY(line) (((line) >> 3) & 0x3) #define RFCOMM_MKRPN_LINE_SETTINGS(data, stop, parity) \ (((data) & 0x3) | (((stop) & 0x1) << 2) | (((parity) & 0x3) << 3)) /***************************************************************************** ***************************************************************************** ** SOCK_STREAM RFCOMM sockets ** ***************************************************************************** *****************************************************************************/ #define NG_BTSOCKET_RFCOMM_SENDSPACE \ (RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 2) #define NG_BTSOCKET_RFCOMM_RECVSPACE \ (RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 2) /* * Bluetooth RFCOMM session. One L2CAP connection == one RFCOMM session */ struct ng_btsocket_rfcomm_pcb; struct ng_btsocket_rfcomm_session; struct ng_btsocket_rfcomm_session { struct socket *l2so; /* L2CAP socket */ u_int16_t state; /* session state */ #define NG_BTSOCKET_RFCOMM_SESSION_CLOSED 0 #define NG_BTSOCKET_RFCOMM_SESSION_LISTENING 1 #define NG_BTSOCKET_RFCOMM_SESSION_CONNECTING 2 #define NG_BTSOCKET_RFCOMM_SESSION_CONNECTED 3 #define NG_BTSOCKET_RFCOMM_SESSION_OPEN 4 #define NG_BTSOCKET_RFCOMM_SESSION_DISCONNECTING 5 u_int16_t flags; /* session flags */ #define NG_BTSOCKET_RFCOMM_SESSION_INITIATOR (1 << 0) /* initiator */ #define NG_BTSOCKET_RFCOMM_SESSION_LFC (1 << 1) /* local flow */ #define NG_BTSOCKET_RFCOMM_SESSION_RFC (1 << 2) /* remote flow */ #define INITIATOR(s) \ (((s)->flags & NG_BTSOCKET_RFCOMM_SESSION_INITIATOR)? 1 : 0) u_int16_t mtu; /* default MTU */ struct ng_bt_mbufq outq; /* outgoing queue */ struct mtx session_mtx; /* session lock */ LIST_HEAD(, ng_btsocket_rfcomm_pcb) dlcs; /* active DLC */ LIST_ENTRY(ng_btsocket_rfcomm_session) next; /* link to next */ }; typedef struct ng_btsocket_rfcomm_session ng_btsocket_rfcomm_session_t; typedef struct ng_btsocket_rfcomm_session * ng_btsocket_rfcomm_session_p; /* * Bluetooth RFCOMM socket PCB (DLC) */ struct ng_btsocket_rfcomm_pcb { struct socket *so; /* RFCOMM socket */ struct ng_btsocket_rfcomm_session *session; /* RFCOMM session */ u_int16_t flags; /* DLC flags */ #define NG_BTSOCKET_RFCOMM_DLC_TIMO (1 << 0) /* timeout pending */ #define NG_BTSOCKET_RFCOMM_DLC_CFC (1 << 1) /* credit flow ctrl */ #define NG_BTSOCKET_RFCOMM_DLC_TIMEDOUT (1 << 2) /* timeout happened */ #define NG_BTSOCKET_RFCOMM_DLC_DETACHED (1 << 3) /* DLC detached */ #define NG_BTSOCKET_RFCOMM_DLC_SENDING (1 << 4) /* send pending */ u_int16_t state; /* DLC state */ #define NG_BTSOCKET_RFCOMM_DLC_CLOSED 0 #define NG_BTSOCKET_RFCOMM_DLC_W4_CONNECT 1 #define NG_BTSOCKET_RFCOMM_DLC_CONFIGURING 2 #define NG_BTSOCKET_RFCOMM_DLC_CONNECTING 3 #define NG_BTSOCKET_RFCOMM_DLC_CONNECTED 4 #define NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING 5 bdaddr_t src; /* source address */ bdaddr_t dst; /* dest. address */ u_int8_t channel; /* RFCOMM channel */ u_int8_t dlci; /* RFCOMM DLCI */ u_int8_t lmodem; /* local mdm signls */ u_int8_t rmodem; /* remote -/- */ u_int16_t mtu; /* MTU */ int16_t rx_cred; /* RX credits */ int16_t tx_cred; /* TX credits */ struct mtx pcb_mtx; /* PCB lock */ struct callout timo; /* timeout */ LIST_ENTRY(ng_btsocket_rfcomm_pcb) session_next;/* link to next */ LIST_ENTRY(ng_btsocket_rfcomm_pcb) next; /* link to next */ }; typedef struct ng_btsocket_rfcomm_pcb ng_btsocket_rfcomm_pcb_t; typedef struct ng_btsocket_rfcomm_pcb * ng_btsocket_rfcomm_pcb_p; #define so2rfcomm_pcb(so) \ ((struct ng_btsocket_rfcomm_pcb *)((so)->so_pcb)) /* * Bluetooth RFCOMM socket methods */ #ifdef _KERNEL void ng_btsocket_rfcomm_abort (struct socket *); void ng_btsocket_rfcomm_close (struct socket *); int ng_btsocket_rfcomm_accept (struct socket *, struct sockaddr *); int ng_btsocket_rfcomm_attach (struct socket *, int, struct thread *); int ng_btsocket_rfcomm_bind (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_rfcomm_connect (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_rfcomm_control (struct socket *, u_long, void *, struct ifnet *, struct thread *); int ng_btsocket_rfcomm_ctloutput (struct socket *, struct sockopt *); void ng_btsocket_rfcomm_detach (struct socket *); int ng_btsocket_rfcomm_disconnect (struct socket *); int ng_btsocket_rfcomm_listen (struct socket *, int, struct thread *); -int ng_btsocket_rfcomm_peeraddr (struct socket *, struct sockaddr **); +int ng_btsocket_rfcomm_peeraddr (struct socket *, struct sockaddr *); int ng_btsocket_rfcomm_send (struct socket *, int, struct mbuf *, struct sockaddr *, struct mbuf *, struct thread *); -int ng_btsocket_rfcomm_sockaddr (struct socket *, struct sockaddr **); +int ng_btsocket_rfcomm_sockaddr (struct socket *, struct sockaddr *); #endif /* _KERNEL */ #endif /* _NETGRAPH_BTSOCKET_RFCOMM_H_ */ diff --git a/sys/netgraph/bluetooth/include/ng_btsocket_sco.h b/sys/netgraph/bluetooth/include/ng_btsocket_sco.h index 282980cce881..4b131dab4223 100644 --- a/sys/netgraph/bluetooth/include/ng_btsocket_sco.h +++ b/sys/netgraph/bluetooth/include/ng_btsocket_sco.h @@ -1,129 +1,129 @@ /* * ng_btsocket_sco.h */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_sco.h,v 1.3 2005/10/31 18:08:52 max Exp $ */ #ifndef _NETGRAPH_BTSOCKET_SCO_H_ #define _NETGRAPH_BTSOCKET_SCO_H_ /* * SCO routing entry */ struct ng_hook; struct ng_message; struct ng_btsocket_sco_rtentry { bdaddr_t src; /* source BD_ADDR */ u_int16_t pkt_size; /* mtu */ u_int16_t num_pkts; /* buffer size */ int32_t pending; /* pending packets */ struct ng_hook *hook; /* downstream hook */ LIST_ENTRY(ng_btsocket_sco_rtentry) next; /* link to next */ }; typedef struct ng_btsocket_sco_rtentry ng_btsocket_sco_rtentry_t; typedef struct ng_btsocket_sco_rtentry * ng_btsocket_sco_rtentry_p; /***************************************************************************** ***************************************************************************** ** SOCK_SEQPACKET SCO sockets ** ***************************************************************************** *****************************************************************************/ #define NG_BTSOCKET_SCO_SENDSPACE 1024 #define NG_BTSOCKET_SCO_RECVSPACE (64 * 1024) /* * Bluetooth SCO socket PCB */ struct ng_btsocket_sco_pcb { struct socket *so; /* Pointer to socket */ bdaddr_t src; /* Source address */ bdaddr_t dst; /* Destination address */ u_int16_t con_handle; /* connection handle */ u_int16_t flags; /* socket flags */ #define NG_BTSOCKET_SCO_CLIENT (1 << 0) /* socket is client */ #define NG_BTSOCKET_SCO_TIMO (1 << 1) /* timeout pending */ u_int8_t state; /* socket state */ #define NG_BTSOCKET_SCO_CLOSED 0 /* socket closed */ #define NG_BTSOCKET_SCO_CONNECTING 1 /* wait for connect */ #define NG_BTSOCKET_SCO_OPEN 2 /* socket open */ #define NG_BTSOCKET_SCO_DISCONNECTING 3 /* wait for disconnect */ struct callout timo; /* timeout */ ng_btsocket_sco_rtentry_p rt; /* routing info */ struct mtx pcb_mtx; /* pcb mutex */ LIST_ENTRY(ng_btsocket_sco_pcb) next; /* link to next PCB */ }; typedef struct ng_btsocket_sco_pcb ng_btsocket_sco_pcb_t; typedef struct ng_btsocket_sco_pcb * ng_btsocket_sco_pcb_p; #define so2sco_pcb(so) \ ((struct ng_btsocket_sco_pcb *)((so)->so_pcb)) /* * Bluetooth SCO socket methods */ #ifdef _KERNEL void ng_btsocket_sco_abort (struct socket *); void ng_btsocket_sco_close (struct socket *); int ng_btsocket_sco_accept (struct socket *, struct sockaddr *); int ng_btsocket_sco_attach (struct socket *, int, struct thread *); int ng_btsocket_sco_bind (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_sco_connect (struct socket *, struct sockaddr *, struct thread *); int ng_btsocket_sco_control (struct socket *, u_long, void *, struct ifnet *, struct thread *); int ng_btsocket_sco_ctloutput (struct socket *, struct sockopt *); void ng_btsocket_sco_detach (struct socket *); int ng_btsocket_sco_disconnect (struct socket *); int ng_btsocket_sco_listen (struct socket *, int, struct thread *); -int ng_btsocket_sco_peeraddr (struct socket *, struct sockaddr **); +int ng_btsocket_sco_peeraddr (struct socket *, struct sockaddr *); int ng_btsocket_sco_send (struct socket *, int, struct mbuf *, struct sockaddr *, struct mbuf *, struct thread *); -int ng_btsocket_sco_sockaddr (struct socket *, struct sockaddr **); +int ng_btsocket_sco_sockaddr (struct socket *, struct sockaddr *); #endif /* _KERNEL */ #endif /* _NETGRAPH_BTSOCKET_SCO_H_ */ diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket.c b/sys/netgraph/bluetooth/socket/ng_btsocket.c index 03fa322e7775..a90256595911 100644 --- a/sys/netgraph/bluetooth/socket/ng_btsocket.c +++ b/sys/netgraph/bluetooth/socket/ng_btsocket.c @@ -1,248 +1,248 @@ /* * ng_btsocket.c */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket.c,v 1.4 2003/09/14 23:29:06 max Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int ng_btsocket_modevent (module_t, int, void *); /* * Definitions of protocols supported in the BLUETOOTH domain */ /* Bluetooth raw HCI sockets */ static struct protosw ng_btsocket_hci_raw_protosw = { .pr_type = SOCK_RAW, .pr_protocol = BLUETOOTH_PROTO_HCI, .pr_flags = PR_ATOMIC|PR_ADDR, .pr_ctloutput = ng_btsocket_hci_raw_ctloutput, .pr_abort = ng_btsocket_hci_raw_abort, .pr_attach = ng_btsocket_hci_raw_attach, .pr_bind = ng_btsocket_hci_raw_bind, .pr_connect = ng_btsocket_hci_raw_connect, .pr_control = ng_btsocket_hci_raw_control, .pr_detach = ng_btsocket_hci_raw_detach, .pr_disconnect = ng_btsocket_hci_raw_disconnect, - .pr_peeraddr = ng_btsocket_hci_raw_peeraddr, + .pr_peeraddr = ng_btsocket_hci_raw_sockaddr, .pr_send = ng_btsocket_hci_raw_send, .pr_sockaddr = ng_btsocket_hci_raw_sockaddr, .pr_close = ng_btsocket_hci_raw_close, }; /* Bluetooth raw L2CAP sockets */ static struct protosw ng_btsocket_l2cap_raw_protosw = { .pr_type = SOCK_RAW, .pr_protocol = BLUETOOTH_PROTO_L2CAP, .pr_flags = PR_ATOMIC|PR_ADDR, .pr_abort = ng_btsocket_l2cap_raw_abort, .pr_attach = ng_btsocket_l2cap_raw_attach, .pr_bind = ng_btsocket_l2cap_raw_bind, .pr_connect = ng_btsocket_l2cap_raw_connect, .pr_control = ng_btsocket_l2cap_raw_control, .pr_detach = ng_btsocket_l2cap_raw_detach, .pr_disconnect = ng_btsocket_l2cap_raw_disconnect, .pr_peeraddr = ng_btsocket_l2cap_raw_peeraddr, .pr_send = ng_btsocket_l2cap_raw_send, .pr_sockaddr = ng_btsocket_l2cap_raw_sockaddr, .pr_close = ng_btsocket_l2cap_raw_close, }; /* Bluetooth SEQPACKET L2CAP sockets */ static struct protosw ng_btsocket_l2cap_protosw = { .pr_type = SOCK_SEQPACKET, .pr_protocol = BLUETOOTH_PROTO_L2CAP, .pr_flags = PR_ATOMIC|PR_CONNREQUIRED, .pr_ctloutput = ng_btsocket_l2cap_ctloutput, .pr_abort = ng_btsocket_l2cap_abort, - .pr_accept = ng_btsocket_l2cap_accept, + .pr_accept = ng_btsocket_l2cap_peeraddr, .pr_attach = ng_btsocket_l2cap_attach, .pr_bind = ng_btsocket_l2cap_bind, .pr_connect = ng_btsocket_l2cap_connect, .pr_control = ng_btsocket_l2cap_control, .pr_detach = ng_btsocket_l2cap_detach, .pr_disconnect = ng_btsocket_l2cap_disconnect, .pr_listen = ng_btsocket_l2cap_listen, .pr_peeraddr = ng_btsocket_l2cap_peeraddr, .pr_send = ng_btsocket_l2cap_send, .pr_sockaddr = ng_btsocket_l2cap_sockaddr, .pr_close = ng_btsocket_l2cap_close, }; /* Bluetooth STREAM RFCOMM sockets */ static struct protosw ng_btsocket_rfcomm_protosw = { .pr_type = SOCK_STREAM, .pr_protocol = BLUETOOTH_PROTO_RFCOMM, .pr_flags = PR_CONNREQUIRED, .pr_ctloutput = ng_btsocket_rfcomm_ctloutput, .pr_abort = ng_btsocket_rfcomm_abort, - .pr_accept = ng_btsocket_rfcomm_accept, + .pr_accept = ng_btsocket_rfcomm_peeraddr, .pr_attach = ng_btsocket_rfcomm_attach, .pr_bind = ng_btsocket_rfcomm_bind, .pr_connect = ng_btsocket_rfcomm_connect, .pr_control = ng_btsocket_rfcomm_control, .pr_detach = ng_btsocket_rfcomm_detach, .pr_disconnect = ng_btsocket_rfcomm_disconnect, .pr_listen = ng_btsocket_rfcomm_listen, .pr_peeraddr = ng_btsocket_rfcomm_peeraddr, .pr_send = ng_btsocket_rfcomm_send, .pr_sockaddr = ng_btsocket_rfcomm_sockaddr, .pr_close = ng_btsocket_rfcomm_close, }; /* Bluetooth SEQPACKET SCO sockets */ static struct protosw ng_btsocket_sco_protosw = { .pr_type = SOCK_SEQPACKET, .pr_protocol = BLUETOOTH_PROTO_SCO, .pr_flags = PR_ATOMIC|PR_CONNREQUIRED, .pr_ctloutput = ng_btsocket_sco_ctloutput, .pr_abort = ng_btsocket_sco_abort, - .pr_accept = ng_btsocket_sco_accept, + .pr_accept = ng_btsocket_sco_peeraddr, .pr_attach = ng_btsocket_sco_attach, .pr_bind = ng_btsocket_sco_bind, .pr_connect = ng_btsocket_sco_connect, .pr_control = ng_btsocket_sco_control, .pr_detach = ng_btsocket_sco_detach, .pr_disconnect = ng_btsocket_sco_disconnect, .pr_listen = ng_btsocket_sco_listen, .pr_peeraddr = ng_btsocket_sco_peeraddr, .pr_send = ng_btsocket_sco_send, .pr_sockaddr = ng_btsocket_sco_sockaddr, .pr_close = ng_btsocket_sco_close, }; /* * BLUETOOTH domain */ static struct domain ng_btsocket_domain = { .dom_family = AF_BLUETOOTH, .dom_name = "bluetooth", .dom_nprotosw = 5, .dom_protosw = { &ng_btsocket_hci_raw_protosw, &ng_btsocket_l2cap_raw_protosw, &ng_btsocket_l2cap_protosw, &ng_btsocket_rfcomm_protosw, &ng_btsocket_sco_protosw, }, }; /* * Socket sysctl tree */ SYSCTL_NODE(_net_bluetooth_hci, OID_AUTO, sockets, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth HCI sockets family"); SYSCTL_NODE(_net_bluetooth_l2cap, OID_AUTO, sockets, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth L2CAP sockets family"); SYSCTL_NODE(_net_bluetooth_rfcomm, OID_AUTO, sockets, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth RFCOMM sockets family"); SYSCTL_NODE(_net_bluetooth_sco, OID_AUTO, sockets, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth SCO sockets family"); /* * Module */ static moduledata_t ng_btsocket_mod = { "ng_btsocket", ng_btsocket_modevent, NULL }; DECLARE_MODULE(ng_btsocket, ng_btsocket_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); MODULE_VERSION(ng_btsocket, NG_BLUETOOTH_VERSION); MODULE_DEPEND(ng_btsocket, ng_bluetooth, NG_BLUETOOTH_VERSION, NG_BLUETOOTH_VERSION, NG_BLUETOOTH_VERSION); MODULE_DEPEND(ng_btsocket, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); /* * Handle loading and unloading for this node type. * This is to handle auxiliary linkages (e.g protocol domain addition). */ static int ng_btsocket_modevent(module_t mod, int event, void *data) { int error = 0; switch (event) { case MOD_LOAD: break; case MOD_UNLOAD: /* XXX can't unload protocol domain yet */ error = EBUSY; break; default: error = EOPNOTSUPP; break; } return (error); } /* ng_btsocket_modevent */ DOMAIN_SET(ng_btsocket_); diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_hci_raw.c b/sys/netgraph/bluetooth/socket/ng_btsocket_hci_raw.c index 935991696929..5d015b2eac6e 100644 --- a/sys/netgraph/bluetooth/socket/ng_btsocket_hci_raw.c +++ b/sys/netgraph/bluetooth/socket/ng_btsocket_hci_raw.c @@ -1,1680 +1,1669 @@ /* * ng_btsocket_hci_raw.c */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_hci_raw.c,v 1.14 2003/09/14 23:29:06 max Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* MALLOC define */ #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_BTSOCKET_HCI_RAW, "netgraph_btsocks_hci_raw", "Netgraph Bluetooth raw HCI sockets"); #else #define M_NETGRAPH_BTSOCKET_HCI_RAW M_NETGRAPH #endif /* NG_SEPARATE_MALLOC */ /* Netgraph node methods */ static ng_constructor_t ng_btsocket_hci_raw_node_constructor; static ng_rcvmsg_t ng_btsocket_hci_raw_node_rcvmsg; static ng_shutdown_t ng_btsocket_hci_raw_node_shutdown; static ng_newhook_t ng_btsocket_hci_raw_node_newhook; static ng_connect_t ng_btsocket_hci_raw_node_connect; static ng_rcvdata_t ng_btsocket_hci_raw_node_rcvdata; static ng_disconnect_t ng_btsocket_hci_raw_node_disconnect; static void ng_btsocket_hci_raw_input (void *, int); static void ng_btsocket_hci_raw_output(node_p, hook_p, void *, int); static void ng_btsocket_hci_raw_savctl(ng_btsocket_hci_raw_pcb_p, struct mbuf **, struct mbuf *); static int ng_btsocket_hci_raw_filter(ng_btsocket_hci_raw_pcb_p, struct mbuf *, int); #define ng_btsocket_hci_raw_wakeup_input_task() \ taskqueue_enqueue(taskqueue_swi, &ng_btsocket_hci_raw_task) /* Security filter */ struct ng_btsocket_hci_raw_sec_filter { bitstr_t bit_decl(events, 0xff); bitstr_t bit_decl(commands[0x3f], 0x3ff); }; /* Netgraph type descriptor */ static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_BTSOCKET_HCI_RAW_NODE_TYPE, .constructor = ng_btsocket_hci_raw_node_constructor, .rcvmsg = ng_btsocket_hci_raw_node_rcvmsg, .shutdown = ng_btsocket_hci_raw_node_shutdown, .newhook = ng_btsocket_hci_raw_node_newhook, .connect = ng_btsocket_hci_raw_node_connect, .rcvdata = ng_btsocket_hci_raw_node_rcvdata, .disconnect = ng_btsocket_hci_raw_node_disconnect, }; /* Globals */ static u_int32_t ng_btsocket_hci_raw_debug_level; static u_int32_t ng_btsocket_hci_raw_ioctl_timeout; static node_p ng_btsocket_hci_raw_node; static struct ng_bt_itemq ng_btsocket_hci_raw_queue; static struct mtx ng_btsocket_hci_raw_queue_mtx; static struct task ng_btsocket_hci_raw_task; static LIST_HEAD(, ng_btsocket_hci_raw_pcb) ng_btsocket_hci_raw_sockets; static struct mtx ng_btsocket_hci_raw_sockets_mtx; static u_int32_t ng_btsocket_hci_raw_token; static struct mtx ng_btsocket_hci_raw_token_mtx; static struct ng_btsocket_hci_raw_sec_filter *ng_btsocket_hci_raw_sec_filter; static struct timeval ng_btsocket_hci_raw_lasttime; static int ng_btsocket_hci_raw_curpps; /* Sysctl tree */ SYSCTL_DECL(_net_bluetooth_hci_sockets); static SYSCTL_NODE(_net_bluetooth_hci_sockets, OID_AUTO, raw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth raw HCI sockets family"); SYSCTL_UINT(_net_bluetooth_hci_sockets_raw, OID_AUTO, debug_level, CTLFLAG_RW, &ng_btsocket_hci_raw_debug_level, NG_BTSOCKET_WARN_LEVEL, "Bluetooth raw HCI sockets debug level"); SYSCTL_UINT(_net_bluetooth_hci_sockets_raw, OID_AUTO, ioctl_timeout, CTLFLAG_RW, &ng_btsocket_hci_raw_ioctl_timeout, 5, "Bluetooth raw HCI sockets ioctl timeout"); SYSCTL_UINT(_net_bluetooth_hci_sockets_raw, OID_AUTO, queue_len, CTLFLAG_RD, &ng_btsocket_hci_raw_queue.len, 0, "Bluetooth raw HCI sockets input queue length"); SYSCTL_UINT(_net_bluetooth_hci_sockets_raw, OID_AUTO, queue_maxlen, CTLFLAG_RD, &ng_btsocket_hci_raw_queue.maxlen, 0, "Bluetooth raw HCI sockets input queue max. length"); SYSCTL_UINT(_net_bluetooth_hci_sockets_raw, OID_AUTO, queue_drops, CTLFLAG_RD, &ng_btsocket_hci_raw_queue.drops, 0, "Bluetooth raw HCI sockets input queue drops"); /* Debug */ #define NG_BTSOCKET_HCI_RAW_INFO \ if (ng_btsocket_hci_raw_debug_level >= NG_BTSOCKET_INFO_LEVEL && \ ppsratecheck(&ng_btsocket_hci_raw_lasttime, &ng_btsocket_hci_raw_curpps, 1)) \ printf #define NG_BTSOCKET_HCI_RAW_WARN \ if (ng_btsocket_hci_raw_debug_level >= NG_BTSOCKET_WARN_LEVEL && \ ppsratecheck(&ng_btsocket_hci_raw_lasttime, &ng_btsocket_hci_raw_curpps, 1)) \ printf #define NG_BTSOCKET_HCI_RAW_ERR \ if (ng_btsocket_hci_raw_debug_level >= NG_BTSOCKET_ERR_LEVEL && \ ppsratecheck(&ng_btsocket_hci_raw_lasttime, &ng_btsocket_hci_raw_curpps, 1)) \ printf #define NG_BTSOCKET_HCI_RAW_ALERT \ if (ng_btsocket_hci_raw_debug_level >= NG_BTSOCKET_ALERT_LEVEL && \ ppsratecheck(&ng_btsocket_hci_raw_lasttime, &ng_btsocket_hci_raw_curpps, 1)) \ printf /**************************************************************************** **************************************************************************** ** Netgraph specific **************************************************************************** ****************************************************************************/ /* * Netgraph node constructor. Do not allow to create node of this type. */ static int ng_btsocket_hci_raw_node_constructor(node_p node) { return (EINVAL); } /* ng_btsocket_hci_raw_node_constructor */ /* * Netgraph node destructor. Just let old node go and create new fresh one. */ static int ng_btsocket_hci_raw_node_shutdown(node_p node) { int error = 0; NG_NODE_UNREF(node); error = ng_make_node_common(&typestruct, &ng_btsocket_hci_raw_node); if (error != 0) { NG_BTSOCKET_HCI_RAW_ALERT( "%s: Could not create Netgraph node, error=%d\n", __func__, error); ng_btsocket_hci_raw_node = NULL; return (ENOMEM); } error = ng_name_node(ng_btsocket_hci_raw_node, NG_BTSOCKET_HCI_RAW_NODE_TYPE); if (error != 0) { NG_BTSOCKET_HCI_RAW_ALERT( "%s: Could not name Netgraph node, error=%d\n", __func__, error); NG_NODE_UNREF(ng_btsocket_hci_raw_node); ng_btsocket_hci_raw_node = NULL; return (EINVAL); } return (0); } /* ng_btsocket_hci_raw_node_shutdown */ /* * Create new hook. Just say "yes" */ static int ng_btsocket_hci_raw_node_newhook(node_p node, hook_p hook, char const *name) { return (0); } /* ng_btsocket_hci_raw_node_newhook */ /* * Connect hook. Just say "yes" */ static int ng_btsocket_hci_raw_node_connect(hook_p hook) { return (0); } /* ng_btsocket_hci_raw_node_connect */ /* * Disconnect hook */ static int ng_btsocket_hci_raw_node_disconnect(hook_p hook) { return (0); } /* ng_btsocket_hci_raw_node_disconnect */ /* * Receive control message. * Make sure it is a message from HCI node and it is a response. * Enqueue item and schedule input task. */ static int ng_btsocket_hci_raw_node_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ng_mesg *msg = NGI_MSG(item); /* item still has message */ int error = 0; /* * Check for empty sockets list creates LOR when both sender and * receiver device are connected to the same host, so remove it * for now */ if (msg != NULL && (msg->header.typecookie == NGM_HCI_COOKIE || msg->header.typecookie == NGM_GENERIC_COOKIE) && msg->header.flags & NGF_RESP) { if (msg->header.token == 0) { NG_FREE_ITEM(item); return (0); } mtx_lock(&ng_btsocket_hci_raw_queue_mtx); if (NG_BT_ITEMQ_FULL(&ng_btsocket_hci_raw_queue)) { NG_BTSOCKET_HCI_RAW_ERR( "%s: Input queue is full\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_hci_raw_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_hci_raw_queue, item); error = ng_btsocket_hci_raw_wakeup_input_task(); } mtx_unlock(&ng_btsocket_hci_raw_queue_mtx); } else { NG_FREE_ITEM(item); error = EINVAL; } return (error); } /* ng_btsocket_hci_raw_node_rcvmsg */ /* * Receive packet from the one of our hook. * Prepend every packet with sockaddr_hci and record sender's node name. * Enqueue item and schedule input task. */ static int ng_btsocket_hci_raw_node_rcvdata(hook_p hook, item_p item) { struct mbuf *nam = NULL; int error; /* * Check for empty sockets list creates LOR when both sender and * receiver device are connected to the same host, so remove it * for now */ MGET(nam, M_NOWAIT, MT_SONAME); if (nam != NULL) { struct sockaddr_hci *sa = mtod(nam, struct sockaddr_hci *); nam->m_len = sizeof(struct sockaddr_hci); sa->hci_len = sizeof(*sa); sa->hci_family = AF_BLUETOOTH; strlcpy(sa->hci_node, NG_PEER_NODE_NAME(hook), sizeof(sa->hci_node)); NGI_GET_M(item, nam->m_next); NGI_M(item) = nam; mtx_lock(&ng_btsocket_hci_raw_queue_mtx); if (NG_BT_ITEMQ_FULL(&ng_btsocket_hci_raw_queue)) { NG_BTSOCKET_HCI_RAW_ERR( "%s: Input queue is full\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_hci_raw_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_hci_raw_queue, item); error = ng_btsocket_hci_raw_wakeup_input_task(); } mtx_unlock(&ng_btsocket_hci_raw_queue_mtx); } else { NG_BTSOCKET_HCI_RAW_ERR( "%s: Failed to allocate address mbuf\n", __func__); NG_FREE_ITEM(item); error = ENOBUFS; } return (error); } /* ng_btsocket_hci_raw_node_rcvdata */ /**************************************************************************** **************************************************************************** ** Sockets specific **************************************************************************** ****************************************************************************/ /* * Get next token. We need token to avoid theoretical race where process * submits ioctl() message then interrupts ioctl() and re-submits another * ioctl() on the same socket *before* first ioctl() complete. */ static void ng_btsocket_hci_raw_get_token(u_int32_t *token) { mtx_lock(&ng_btsocket_hci_raw_token_mtx); if (++ ng_btsocket_hci_raw_token == 0) ng_btsocket_hci_raw_token = 1; *token = ng_btsocket_hci_raw_token; mtx_unlock(&ng_btsocket_hci_raw_token_mtx); } /* ng_btsocket_hci_raw_get_token */ /* * Send Netgraph message to the node - do not expect reply */ static int ng_btsocket_hci_raw_send_ngmsg(char *path, int cmd, void *arg, int arglen) { struct ng_mesg *msg = NULL; int error = 0; NG_MKMESSAGE(msg, NGM_HCI_COOKIE, cmd, arglen, M_NOWAIT); if (msg == NULL) return (ENOMEM); if (arg != NULL && arglen > 0) bcopy(arg, msg->data, arglen); NG_SEND_MSG_PATH(error, ng_btsocket_hci_raw_node, msg, path, 0); return (error); } /* ng_btsocket_hci_raw_send_ngmsg */ /* * Send Netgraph message to the node (no data) and wait for reply */ static int ng_btsocket_hci_raw_send_sync_ngmsg(ng_btsocket_hci_raw_pcb_p pcb, char *path, int cmd, void *rsp, int rsplen) { struct ng_mesg *msg = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); NG_MKMESSAGE(msg, NGM_HCI_COOKIE, cmd, 0, M_NOWAIT); if (msg == NULL) return (ENOMEM); ng_btsocket_hci_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; NG_SEND_MSG_PATH(error, ng_btsocket_hci_raw_node, msg, path, 0); if (error != 0) { pcb->token = 0; return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "hcictl", ng_btsocket_hci_raw_ioctl_timeout * hz); pcb->token = 0; if (error != 0) return (error); if (pcb->msg != NULL && pcb->msg->header.cmd == cmd) bcopy(pcb->msg->data, rsp, rsplen); else error = EINVAL; NG_FREE_MSG(pcb->msg); /* checks for != NULL */ return (0); } /* ng_btsocket_hci_raw_send_sync_ngmsg */ /* * Create control information for the packet */ static void ng_btsocket_hci_raw_savctl(ng_btsocket_hci_raw_pcb_p pcb, struct mbuf **ctl, struct mbuf *m) { int dir; struct timeval tv; mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->flags & NG_BTSOCKET_HCI_RAW_DIRECTION) { dir = (m->m_flags & M_PROTO1)? 1 : 0; *ctl = sbcreatecontrol(&dir, sizeof(dir), SCM_HCI_RAW_DIRECTION, SOL_HCI_RAW, M_NOWAIT); if (*ctl != NULL) ctl = &((*ctl)->m_next); } if (pcb->so->so_options & SO_TIMESTAMP) { microtime(&tv); *ctl = sbcreatecontrol(&tv, sizeof(tv), SCM_TIMESTAMP, SOL_SOCKET, M_NOWAIT); if (*ctl != NULL) ctl = &((*ctl)->m_next); } } /* ng_btsocket_hci_raw_savctl */ /* * Raw HCI sockets data input routine */ static void ng_btsocket_hci_raw_data_input(struct mbuf *nam) { ng_btsocket_hci_raw_pcb_p pcb = NULL; struct mbuf *m0 = NULL, *m = NULL; struct sockaddr_hci *sa = NULL; m0 = nam->m_next; nam->m_next = NULL; KASSERT((nam->m_type == MT_SONAME), ("%s: m_type=%d\n", __func__, nam->m_type)); KASSERT((m0->m_flags & M_PKTHDR), ("%s: m_flags=%#x\n", __func__, m0->m_flags)); sa = mtod(nam, struct sockaddr_hci *); mtx_lock(&ng_btsocket_hci_raw_sockets_mtx); LIST_FOREACH(pcb, &ng_btsocket_hci_raw_sockets, next) { mtx_lock(&pcb->pcb_mtx); /* * If socket was bound then check address and * make sure it matches. */ if (pcb->addr.hci_node[0] != 0 && strcmp(sa->hci_node, pcb->addr.hci_node) != 0) goto next; /* * Check packet against filters * XXX do we have to call m_pullup() here? */ if (ng_btsocket_hci_raw_filter(pcb, m0, 1) != 0) goto next; /* * Make a copy of the packet, append to the socket's * receive queue and wakeup socket. sbappendaddr() * will check if socket has enough buffer space. */ m = m_dup(m0, M_NOWAIT); if (m != NULL) { struct mbuf *ctl = NULL; ng_btsocket_hci_raw_savctl(pcb, &ctl, m); if (sbappendaddr(&pcb->so->so_rcv, (struct sockaddr *) sa, m, ctl)) sorwakeup(pcb->so); else { NG_BTSOCKET_HCI_RAW_INFO( "%s: sbappendaddr() failed\n", __func__); NG_FREE_M(m); NG_FREE_M(ctl); soroverflow(pcb->so); } } next: mtx_unlock(&pcb->pcb_mtx); } mtx_unlock(&ng_btsocket_hci_raw_sockets_mtx); NG_FREE_M(nam); NG_FREE_M(m0); } /* ng_btsocket_hci_raw_data_input */ /* * Raw HCI sockets message input routine */ static void ng_btsocket_hci_raw_msg_input(struct ng_mesg *msg) { ng_btsocket_hci_raw_pcb_p pcb = NULL; mtx_lock(&ng_btsocket_hci_raw_sockets_mtx); LIST_FOREACH(pcb, &ng_btsocket_hci_raw_sockets, next) { mtx_lock(&pcb->pcb_mtx); if (msg->header.token == pcb->token) { pcb->msg = msg; wakeup(&pcb->msg); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_hci_raw_sockets_mtx); return; } mtx_unlock(&pcb->pcb_mtx); } mtx_unlock(&ng_btsocket_hci_raw_sockets_mtx); NG_FREE_MSG(msg); /* checks for != NULL */ } /* ng_btsocket_hci_raw_msg_input */ /* * Raw HCI sockets input routines */ static void ng_btsocket_hci_raw_input(void *context, int pending) { item_p item = NULL; for (;;) { mtx_lock(&ng_btsocket_hci_raw_queue_mtx); NG_BT_ITEMQ_DEQUEUE(&ng_btsocket_hci_raw_queue, item); mtx_unlock(&ng_btsocket_hci_raw_queue_mtx); if (item == NULL) break; switch(item->el_flags & NGQF_TYPE) { case NGQF_DATA: { struct mbuf *m = NULL; NGI_GET_M(item, m); ng_btsocket_hci_raw_data_input(m); } break; case NGQF_MESG: { struct ng_mesg *msg = NULL; NGI_GET_MSG(item, msg); ng_btsocket_hci_raw_msg_input(msg); } break; default: KASSERT(0, ("%s: invalid item type=%ld\n", __func__, (item->el_flags & NGQF_TYPE))); break; } NG_FREE_ITEM(item); } } /* ng_btsocket_hci_raw_input */ /* * Raw HCI sockets output routine */ static void ng_btsocket_hci_raw_output(node_p node, hook_p hook, void *arg1, int arg2) { struct mbuf *nam = (struct mbuf *) arg1, *m = NULL; struct sockaddr_hci *sa = NULL; int error; m = nam->m_next; nam->m_next = NULL; KASSERT((nam->m_type == MT_SONAME), ("%s: m_type=%d\n", __func__, nam->m_type)); KASSERT((m->m_flags & M_PKTHDR), ("%s: m_flags=%#x\n", __func__, m->m_flags)); sa = mtod(nam, struct sockaddr_hci *); /* * Find downstream hook * XXX For now access node hook list directly. Should be safe because * we used ng_send_fn() and we should have exclusive lock on the node. */ LIST_FOREACH(hook, &node->nd_hooks, hk_hooks) { if (hook == NULL || NG_HOOK_NOT_VALID(hook) || NG_NODE_NOT_VALID(NG_PEER_NODE(hook))) continue; if (strcmp(sa->hci_node, NG_PEER_NODE_NAME(hook)) == 0) { NG_SEND_DATA_ONLY(error, hook, m); /* sets m to NULL */ break; } } NG_FREE_M(nam); /* check for != NULL */ NG_FREE_M(m); } /* ng_btsocket_hci_raw_output */ /* * Check frame against security and socket filters. * d (direction bit) == 1 means incoming frame. */ static int ng_btsocket_hci_raw_filter(ng_btsocket_hci_raw_pcb_p pcb, struct mbuf *m, int d) { int type, event, opcode; mtx_assert(&pcb->pcb_mtx, MA_OWNED); switch ((type = *mtod(m, u_int8_t *))) { case NG_HCI_CMD_PKT: if (!(pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED)) { opcode = le16toh(mtod(m, ng_hci_cmd_pkt_t *)->opcode); if (!bit_test( ng_btsocket_hci_raw_sec_filter->commands[NG_HCI_OGF(opcode) - 1], NG_HCI_OCF(opcode) - 1)) return (EPERM); } if (d && !bit_test(pcb->filter.packet_mask, NG_HCI_CMD_PKT - 1)) return (EPERM); break; case NG_HCI_ACL_DATA_PKT: case NG_HCI_SCO_DATA_PKT: if (!(pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED) || !bit_test(pcb->filter.packet_mask, type - 1) || !d) return (EPERM); break; case NG_HCI_EVENT_PKT: if (!d) return (EINVAL); event = mtod(m, ng_hci_event_pkt_t *)->event - 1; if (!(pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED)) if (!bit_test(ng_btsocket_hci_raw_sec_filter->events, event)) return (EPERM); if (!bit_test(pcb->filter.event_mask, event)) return (EPERM); break; default: return (EINVAL); } return (0); } /* ng_btsocket_hci_raw_filter */ /* * Initialize everything */ static void ng_btsocket_hci_raw_init(void *arg __unused) { bitstr_t *f = NULL; int error = 0; ng_btsocket_hci_raw_node = NULL; ng_btsocket_hci_raw_debug_level = NG_BTSOCKET_WARN_LEVEL; ng_btsocket_hci_raw_ioctl_timeout = 5; /* Register Netgraph node type */ error = ng_newtype(&typestruct); if (error != 0) { NG_BTSOCKET_HCI_RAW_ALERT( "%s: Could not register Netgraph node type, error=%d\n", __func__, error); return; } /* Create Netgrapg node */ error = ng_make_node_common(&typestruct, &ng_btsocket_hci_raw_node); if (error != 0) { NG_BTSOCKET_HCI_RAW_ALERT( "%s: Could not create Netgraph node, error=%d\n", __func__, error); ng_btsocket_hci_raw_node = NULL; return; } error = ng_name_node(ng_btsocket_hci_raw_node, NG_BTSOCKET_HCI_RAW_NODE_TYPE); if (error != 0) { NG_BTSOCKET_HCI_RAW_ALERT( "%s: Could not name Netgraph node, error=%d\n", __func__, error); NG_NODE_UNREF(ng_btsocket_hci_raw_node); ng_btsocket_hci_raw_node = NULL; return; } /* Create input queue */ NG_BT_ITEMQ_INIT(&ng_btsocket_hci_raw_queue, 300); mtx_init(&ng_btsocket_hci_raw_queue_mtx, "btsocks_hci_raw_queue_mtx", NULL, MTX_DEF); TASK_INIT(&ng_btsocket_hci_raw_task, 0, ng_btsocket_hci_raw_input, NULL); /* Create list of sockets */ LIST_INIT(&ng_btsocket_hci_raw_sockets); mtx_init(&ng_btsocket_hci_raw_sockets_mtx, "btsocks_hci_raw_sockets_mtx", NULL, MTX_DEF); /* Tokens */ ng_btsocket_hci_raw_token = 0; mtx_init(&ng_btsocket_hci_raw_token_mtx, "btsocks_hci_raw_token_mtx", NULL, MTX_DEF); /* * Security filter * XXX never free()ed */ ng_btsocket_hci_raw_sec_filter = malloc(sizeof(struct ng_btsocket_hci_raw_sec_filter), M_NETGRAPH_BTSOCKET_HCI_RAW, M_NOWAIT|M_ZERO); if (ng_btsocket_hci_raw_sec_filter == NULL) { printf("%s: Could not allocate security filter!\n", __func__); return; } /* * XXX How paranoid can we get? * * Initialize security filter. If bit is set in the mask then * unprivileged socket is allowed to send (receive) this command * (event). */ /* Enable all events */ memset(&ng_btsocket_hci_raw_sec_filter->events, 0xff, sizeof(ng_btsocket_hci_raw_sec_filter->events)/ sizeof(ng_btsocket_hci_raw_sec_filter->events[0])); /* Disable some critical events */ f = ng_btsocket_hci_raw_sec_filter->events; bit_clear(f, NG_HCI_EVENT_RETURN_LINK_KEYS - 1); bit_clear(f, NG_HCI_EVENT_LINK_KEY_NOTIFICATION - 1); bit_clear(f, NG_HCI_EVENT_VENDOR - 1); /* Commands - Link control */ f = ng_btsocket_hci_raw_sec_filter->commands[NG_HCI_OGF_LINK_CONTROL-1]; bit_set(f, NG_HCI_OCF_INQUIRY - 1); bit_set(f, NG_HCI_OCF_INQUIRY_CANCEL - 1); bit_set(f, NG_HCI_OCF_PERIODIC_INQUIRY - 1); bit_set(f, NG_HCI_OCF_EXIT_PERIODIC_INQUIRY - 1); bit_set(f, NG_HCI_OCF_REMOTE_NAME_REQ - 1); bit_set(f, NG_HCI_OCF_READ_REMOTE_FEATURES - 1); bit_set(f, NG_HCI_OCF_READ_REMOTE_VER_INFO - 1); bit_set(f, NG_HCI_OCF_READ_CLOCK_OFFSET - 1); /* Commands - Link policy */ f = ng_btsocket_hci_raw_sec_filter->commands[NG_HCI_OGF_LINK_POLICY-1]; bit_set(f, NG_HCI_OCF_ROLE_DISCOVERY - 1); bit_set(f, NG_HCI_OCF_READ_LINK_POLICY_SETTINGS - 1); /* Commands - Host controller and baseband */ f = ng_btsocket_hci_raw_sec_filter->commands[NG_HCI_OGF_HC_BASEBAND-1]; bit_set(f, NG_HCI_OCF_READ_PIN_TYPE - 1); bit_set(f, NG_HCI_OCF_READ_LOCAL_NAME - 1); bit_set(f, NG_HCI_OCF_READ_CON_ACCEPT_TIMO - 1); bit_set(f, NG_HCI_OCF_READ_PAGE_TIMO - 1); bit_set(f, NG_HCI_OCF_READ_SCAN_ENABLE - 1); bit_set(f, NG_HCI_OCF_READ_PAGE_SCAN_ACTIVITY - 1); bit_set(f, NG_HCI_OCF_READ_INQUIRY_SCAN_ACTIVITY - 1); bit_set(f, NG_HCI_OCF_READ_AUTH_ENABLE - 1); bit_set(f, NG_HCI_OCF_READ_ENCRYPTION_MODE - 1); bit_set(f, NG_HCI_OCF_READ_UNIT_CLASS - 1); bit_set(f, NG_HCI_OCF_READ_VOICE_SETTINGS - 1); bit_set(f, NG_HCI_OCF_READ_AUTO_FLUSH_TIMO - 1); bit_set(f, NG_HCI_OCF_READ_NUM_BROADCAST_RETRANS - 1); bit_set(f, NG_HCI_OCF_READ_HOLD_MODE_ACTIVITY - 1); bit_set(f, NG_HCI_OCF_READ_XMIT_LEVEL - 1); bit_set(f, NG_HCI_OCF_READ_SCO_FLOW_CONTROL - 1); bit_set(f, NG_HCI_OCF_READ_LINK_SUPERVISION_TIMO - 1); bit_set(f, NG_HCI_OCF_READ_SUPPORTED_IAC_NUM - 1); bit_set(f, NG_HCI_OCF_READ_IAC_LAP - 1); bit_set(f, NG_HCI_OCF_READ_PAGE_SCAN_PERIOD - 1); bit_set(f, NG_HCI_OCF_READ_PAGE_SCAN - 1); bit_set(f, NG_HCI_OCF_READ_LE_HOST_SUPPORTED -1); /* Commands - Informational */ f = ng_btsocket_hci_raw_sec_filter->commands[NG_HCI_OGF_INFO - 1]; bit_set(f, NG_HCI_OCF_READ_LOCAL_VER - 1); bit_set(f, NG_HCI_OCF_READ_LOCAL_FEATURES - 1); bit_set(f, NG_HCI_OCF_READ_BUFFER_SIZE - 1); bit_set(f, NG_HCI_OCF_READ_COUNTRY_CODE - 1); bit_set(f, NG_HCI_OCF_READ_BDADDR - 1); /* Commands - Status */ f = ng_btsocket_hci_raw_sec_filter->commands[NG_HCI_OGF_STATUS - 1]; bit_set(f, NG_HCI_OCF_READ_FAILED_CONTACT_CNTR - 1); bit_set(f, NG_HCI_OCF_GET_LINK_QUALITY - 1); bit_set(f, NG_HCI_OCF_READ_RSSI - 1); /* Commands - Testing */ f = ng_btsocket_hci_raw_sec_filter->commands[NG_HCI_OGF_TESTING - 1]; bit_set(f, NG_HCI_OCF_READ_LOOPBACK_MODE - 1); /*Commands - LE*/ f = ng_btsocket_hci_raw_sec_filter->commands[NG_HCI_OGF_LE -1]; bit_set(f, NG_HCI_OCF_LE_SET_SCAN_ENABLE - 1); bit_set(f, NG_HCI_OCF_LE_SET_SCAN_PARAMETERS - 1); bit_set(f, NG_HCI_OCF_LE_READ_LOCAL_SUPPORTED_FEATURES - 1); bit_set(f, NG_HCI_OCF_LE_READ_BUFFER_SIZE - 1); bit_set(f, NG_HCI_OCF_LE_READ_WHITE_LIST_SIZE - 1); } /* ng_btsocket_hci_raw_init */ SYSINIT(ng_btsocket_hci_raw_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ng_btsocket_hci_raw_init, NULL); /* * Abort connection on socket */ void ng_btsocket_hci_raw_abort(struct socket *so) { } /* ng_btsocket_hci_raw_abort */ void ng_btsocket_hci_raw_close(struct socket *so) { } /* ng_btsocket_hci_raw_close */ /* * Create new raw HCI socket */ int ng_btsocket_hci_raw_attach(struct socket *so, int proto, struct thread *td) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); int error = 0; if (pcb != NULL) return (EISCONN); if (ng_btsocket_hci_raw_node == NULL) return (EPROTONOSUPPORT); if (proto != BLUETOOTH_PROTO_HCI) return (EPROTONOSUPPORT); if (so->so_type != SOCK_RAW) return (ESOCKTNOSUPPORT); error = soreserve(so, NG_BTSOCKET_HCI_RAW_SENDSPACE, NG_BTSOCKET_HCI_RAW_RECVSPACE); if (error != 0) return (error); pcb = malloc(sizeof(*pcb), M_NETGRAPH_BTSOCKET_HCI_RAW, M_NOWAIT|M_ZERO); if (pcb == NULL) return (ENOMEM); so->so_pcb = (caddr_t) pcb; pcb->so = so; if (priv_check(td, PRIV_NETBLUETOOTH_RAW) == 0) pcb->flags |= NG_BTSOCKET_HCI_RAW_PRIVILEGED; /* * Set default socket filter. By default socket only accepts HCI * Command_Complete and Command_Status event packets. */ bit_set(pcb->filter.event_mask, NG_HCI_EVENT_COMMAND_COMPL - 1); bit_set(pcb->filter.event_mask, NG_HCI_EVENT_COMMAND_STATUS - 1); mtx_init(&pcb->pcb_mtx, "btsocks_hci_raw_pcb_mtx", NULL, MTX_DEF); mtx_lock(&ng_btsocket_hci_raw_sockets_mtx); LIST_INSERT_HEAD(&ng_btsocket_hci_raw_sockets, pcb, next); mtx_unlock(&ng_btsocket_hci_raw_sockets_mtx); return (0); } /* ng_btsocket_hci_raw_attach */ /* * Bind raw HCI socket */ int ng_btsocket_hci_raw_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); struct sockaddr_hci *sa = (struct sockaddr_hci *) nam; if (pcb == NULL) return (EINVAL); if (ng_btsocket_hci_raw_node == NULL) return (EINVAL); if (sa == NULL) return (EINVAL); if (sa->hci_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if (sa->hci_len != sizeof(*sa)) return (EINVAL); if (sa->hci_node[0] == 0) return (EINVAL); mtx_lock(&pcb->pcb_mtx); bcopy(sa, &pcb->addr, sizeof(pcb->addr)); mtx_unlock(&pcb->pcb_mtx); return (0); } /* ng_btsocket_hci_raw_bind */ /* * Connect raw HCI socket */ int ng_btsocket_hci_raw_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); struct sockaddr_hci *sa = (struct sockaddr_hci *) nam; if (pcb == NULL) return (EINVAL); if (ng_btsocket_hci_raw_node == NULL) return (EINVAL); if (sa == NULL) return (EINVAL); if (sa->hci_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if (sa->hci_len != sizeof(*sa)) return (EINVAL); if (sa->hci_node[0] == 0) return (EDESTADDRREQ); mtx_lock(&pcb->pcb_mtx); if (bcmp(sa, &pcb->addr, sizeof(pcb->addr)) != 0) { mtx_unlock(&pcb->pcb_mtx); return (EADDRNOTAVAIL); } soisconnected(so); mtx_unlock(&pcb->pcb_mtx); return (0); } /* ng_btsocket_hci_raw_connect */ /* * Process ioctl on socket */ int ng_btsocket_hci_raw_control(struct socket *so, u_long cmd, void *data, struct ifnet *ifp, struct thread *td) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); char path[NG_NODESIZ + 1]; struct ng_mesg *msg = NULL; int error = 0; if (pcb == NULL) return (EINVAL); if (ng_btsocket_hci_raw_node == NULL) return (EINVAL); mtx_lock(&pcb->pcb_mtx); /* Check if we have device name */ if (pcb->addr.hci_node[0] == 0) { mtx_unlock(&pcb->pcb_mtx); return (EHOSTUNREACH); } /* Check if we have pending ioctl() */ if (pcb->token != 0) { mtx_unlock(&pcb->pcb_mtx); return (EBUSY); } snprintf(path, sizeof(path), "%s:", pcb->addr.hci_node); switch (cmd) { case SIOC_HCI_RAW_NODE_GET_STATE: { struct ng_btsocket_hci_raw_node_state *p = (struct ng_btsocket_hci_raw_node_state *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_STATE, &p->state, sizeof(p->state)); } break; case SIOC_HCI_RAW_NODE_INIT: if (pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED) error = ng_btsocket_hci_raw_send_ngmsg(path, NGM_HCI_NODE_INIT, NULL, 0); else error = EPERM; break; case SIOC_HCI_RAW_NODE_GET_DEBUG: { struct ng_btsocket_hci_raw_node_debug *p = (struct ng_btsocket_hci_raw_node_debug *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_DEBUG, &p->debug, sizeof(p->debug)); } break; case SIOC_HCI_RAW_NODE_SET_DEBUG: { struct ng_btsocket_hci_raw_node_debug *p = (struct ng_btsocket_hci_raw_node_debug *) data; if (pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED) error = ng_btsocket_hci_raw_send_ngmsg(path, NGM_HCI_NODE_SET_DEBUG, &p->debug, sizeof(p->debug)); else error = EPERM; } break; case SIOC_HCI_RAW_NODE_GET_BUFFER: { struct ng_btsocket_hci_raw_node_buffer *p = (struct ng_btsocket_hci_raw_node_buffer *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_BUFFER, &p->buffer, sizeof(p->buffer)); } break; case SIOC_HCI_RAW_NODE_GET_BDADDR: { struct ng_btsocket_hci_raw_node_bdaddr *p = (struct ng_btsocket_hci_raw_node_bdaddr *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_BDADDR, &p->bdaddr, sizeof(p->bdaddr)); } break; case SIOC_HCI_RAW_NODE_GET_FEATURES: { struct ng_btsocket_hci_raw_node_features *p = (struct ng_btsocket_hci_raw_node_features *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_FEATURES, &p->features, sizeof(p->features)); } break; case SIOC_HCI_RAW_NODE_GET_STAT: { struct ng_btsocket_hci_raw_node_stat *p = (struct ng_btsocket_hci_raw_node_stat *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_STAT, &p->stat, sizeof(p->stat)); } break; case SIOC_HCI_RAW_NODE_RESET_STAT: if (pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED) error = ng_btsocket_hci_raw_send_ngmsg(path, NGM_HCI_NODE_RESET_STAT, NULL, 0); else error = EPERM; break; case SIOC_HCI_RAW_NODE_FLUSH_NEIGHBOR_CACHE: if (pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED) error = ng_btsocket_hci_raw_send_ngmsg(path, NGM_HCI_NODE_FLUSH_NEIGHBOR_CACHE, NULL, 0); else error = EPERM; break; case SIOC_HCI_RAW_NODE_GET_NEIGHBOR_CACHE: { struct ng_btsocket_hci_raw_node_neighbor_cache *p = (struct ng_btsocket_hci_raw_node_neighbor_cache *) data; ng_hci_node_get_neighbor_cache_ep *p1 = NULL; ng_hci_node_neighbor_cache_entry_ep *p2 = NULL; if (p->num_entries <= 0 || p->num_entries > NG_HCI_MAX_NEIGHBOR_NUM || p->entries == NULL) { mtx_unlock(&pcb->pcb_mtx); return (EINVAL); } NG_MKMESSAGE(msg, NGM_HCI_COOKIE, NGM_HCI_NODE_GET_NEIGHBOR_CACHE, 0, M_NOWAIT); if (msg == NULL) { mtx_unlock(&pcb->pcb_mtx); return (ENOMEM); } ng_btsocket_hci_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; NG_SEND_MSG_PATH(error, ng_btsocket_hci_raw_node, msg, path, 0); if (error != 0) { pcb->token = 0; mtx_unlock(&pcb->pcb_mtx); return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "hcictl", ng_btsocket_hci_raw_ioctl_timeout * hz); pcb->token = 0; if (error != 0) { mtx_unlock(&pcb->pcb_mtx); return (error); } msg = pcb->msg; pcb->msg = NULL; mtx_unlock(&pcb->pcb_mtx); if (msg != NULL && msg->header.cmd == NGM_HCI_NODE_GET_NEIGHBOR_CACHE) { /* Return data back to user space */ p1 = (ng_hci_node_get_neighbor_cache_ep *)(msg->data); p2 = (ng_hci_node_neighbor_cache_entry_ep *)(p1 + 1); p->num_entries = min(p->num_entries, p1->num_entries); if (p->num_entries > 0) error = copyout((caddr_t) p2, (caddr_t) p->entries, p->num_entries * sizeof(*p2)); } else error = EINVAL; NG_FREE_MSG(msg); /* checks for != NULL */ return (error); } /* NOTREACHED */ case SIOC_HCI_RAW_NODE_GET_CON_LIST: { struct ng_btsocket_hci_raw_con_list *p = (struct ng_btsocket_hci_raw_con_list *) data; ng_hci_node_con_list_ep *p1 = NULL; ng_hci_node_con_ep *p2 = NULL; if (p->num_connections == 0 || p->num_connections > NG_HCI_MAX_CON_NUM || p->connections == NULL) { mtx_unlock(&pcb->pcb_mtx); return (EINVAL); } NG_MKMESSAGE(msg, NGM_HCI_COOKIE, NGM_HCI_NODE_GET_CON_LIST, 0, M_NOWAIT); if (msg == NULL) { mtx_unlock(&pcb->pcb_mtx); return (ENOMEM); } ng_btsocket_hci_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; NG_SEND_MSG_PATH(error, ng_btsocket_hci_raw_node, msg, path, 0); if (error != 0) { pcb->token = 0; mtx_unlock(&pcb->pcb_mtx); return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "hcictl", ng_btsocket_hci_raw_ioctl_timeout * hz); pcb->token = 0; if (error != 0) { mtx_unlock(&pcb->pcb_mtx); return (error); } msg = pcb->msg; pcb->msg = NULL; mtx_unlock(&pcb->pcb_mtx); if (msg != NULL && msg->header.cmd == NGM_HCI_NODE_GET_CON_LIST) { /* Return data back to user space */ p1 = (ng_hci_node_con_list_ep *)(msg->data); p2 = (ng_hci_node_con_ep *)(p1 + 1); p->num_connections = min(p->num_connections, p1->num_connections); if (p->num_connections > 0) error = copyout((caddr_t) p2, (caddr_t) p->connections, p->num_connections * sizeof(*p2)); } else error = EINVAL; NG_FREE_MSG(msg); /* checks for != NULL */ return (error); } /* NOTREACHED */ case SIOC_HCI_RAW_NODE_GET_LINK_POLICY_MASK: { struct ng_btsocket_hci_raw_node_link_policy_mask *p = (struct ng_btsocket_hci_raw_node_link_policy_mask *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_LINK_POLICY_SETTINGS_MASK, &p->policy_mask, sizeof(p->policy_mask)); } break; case SIOC_HCI_RAW_NODE_SET_LINK_POLICY_MASK: { struct ng_btsocket_hci_raw_node_link_policy_mask *p = (struct ng_btsocket_hci_raw_node_link_policy_mask *) data; if (pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED) error = ng_btsocket_hci_raw_send_ngmsg(path, NGM_HCI_NODE_SET_LINK_POLICY_SETTINGS_MASK, &p->policy_mask, sizeof(p->policy_mask)); else error = EPERM; } break; case SIOC_HCI_RAW_NODE_GET_PACKET_MASK: { struct ng_btsocket_hci_raw_node_packet_mask *p = (struct ng_btsocket_hci_raw_node_packet_mask *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_PACKET_MASK, &p->packet_mask, sizeof(p->packet_mask)); } break; case SIOC_HCI_RAW_NODE_SET_PACKET_MASK: { struct ng_btsocket_hci_raw_node_packet_mask *p = (struct ng_btsocket_hci_raw_node_packet_mask *) data; if (pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED) error = ng_btsocket_hci_raw_send_ngmsg(path, NGM_HCI_NODE_SET_PACKET_MASK, &p->packet_mask, sizeof(p->packet_mask)); else error = EPERM; } break; case SIOC_HCI_RAW_NODE_GET_ROLE_SWITCH: { struct ng_btsocket_hci_raw_node_role_switch *p = (struct ng_btsocket_hci_raw_node_role_switch *) data; error = ng_btsocket_hci_raw_send_sync_ngmsg(pcb, path, NGM_HCI_NODE_GET_ROLE_SWITCH, &p->role_switch, sizeof(p->role_switch)); } break; case SIOC_HCI_RAW_NODE_SET_ROLE_SWITCH: { struct ng_btsocket_hci_raw_node_role_switch *p = (struct ng_btsocket_hci_raw_node_role_switch *) data; if (pcb->flags & NG_BTSOCKET_HCI_RAW_PRIVILEGED) error = ng_btsocket_hci_raw_send_ngmsg(path, NGM_HCI_NODE_SET_ROLE_SWITCH, &p->role_switch, sizeof(p->role_switch)); else error = EPERM; } break; case SIOC_HCI_RAW_NODE_LIST_NAMES: { struct ng_btsocket_hci_raw_node_list_names *nl = (struct ng_btsocket_hci_raw_node_list_names *) data; struct nodeinfo *ni = nl->names; if (nl->num_names == 0) { mtx_unlock(&pcb->pcb_mtx); return (EINVAL); } NG_MKMESSAGE(msg, NGM_GENERIC_COOKIE, NGM_LISTNAMES, 0, M_NOWAIT); if (msg == NULL) { mtx_unlock(&pcb->pcb_mtx); return (ENOMEM); } ng_btsocket_hci_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; NG_SEND_MSG_PATH(error, ng_btsocket_hci_raw_node, msg, ".:", 0); if (error != 0) { pcb->token = 0; mtx_unlock(&pcb->pcb_mtx); return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "hcictl", ng_btsocket_hci_raw_ioctl_timeout * hz); pcb->token = 0; if (error != 0) { mtx_unlock(&pcb->pcb_mtx); return (error); } msg = pcb->msg; pcb->msg = NULL; mtx_unlock(&pcb->pcb_mtx); if (msg != NULL && msg->header.cmd == NGM_LISTNAMES) { /* Return data back to user space */ struct namelist *nl1 = (struct namelist *) msg->data; struct nodeinfo *ni1 = &nl1->nodeinfo[0]; while (nl->num_names > 0 && nl1->numnames > 0) { if (strcmp(ni1->type, NG_HCI_NODE_TYPE) == 0) { error = copyout((caddr_t) ni1, (caddr_t) ni, sizeof(*ni)); if (error != 0) break; nl->num_names --; ni ++; } nl1->numnames --; ni1 ++; } nl->num_names = ni - nl->names; } else error = EINVAL; NG_FREE_MSG(msg); /* checks for != NULL */ return (error); } /* NOTREACHED */ default: error = EINVAL; break; } mtx_unlock(&pcb->pcb_mtx); return (error); } /* ng_btsocket_hci_raw_control */ /* * Process getsockopt/setsockopt system calls */ int ng_btsocket_hci_raw_ctloutput(struct socket *so, struct sockopt *sopt) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); struct ng_btsocket_hci_raw_filter filter; int error = 0, dir; if (pcb == NULL) return (EINVAL); if (ng_btsocket_hci_raw_node == NULL) return (EINVAL); if (sopt->sopt_level != SOL_HCI_RAW) return (0); mtx_lock(&pcb->pcb_mtx); switch (sopt->sopt_dir) { case SOPT_GET: switch (sopt->sopt_name) { case SO_HCI_RAW_FILTER: error = sooptcopyout(sopt, &pcb->filter, sizeof(pcb->filter)); break; case SO_HCI_RAW_DIRECTION: dir = (pcb->flags & NG_BTSOCKET_HCI_RAW_DIRECTION)?1:0; error = sooptcopyout(sopt, &dir, sizeof(dir)); break; default: error = EINVAL; break; } break; case SOPT_SET: switch (sopt->sopt_name) { case SO_HCI_RAW_FILTER: error = sooptcopyin(sopt, &filter, sizeof(filter), sizeof(filter)); if (error == 0) bcopy(&filter, &pcb->filter, sizeof(pcb->filter)); break; case SO_HCI_RAW_DIRECTION: error = sooptcopyin(sopt, &dir, sizeof(dir), sizeof(dir)); if (error != 0) break; if (dir) pcb->flags |= NG_BTSOCKET_HCI_RAW_DIRECTION; else pcb->flags &= ~NG_BTSOCKET_HCI_RAW_DIRECTION; break; default: error = EINVAL; break; } break; default: error = EINVAL; break; } mtx_unlock(&pcb->pcb_mtx); return (error); } /* ng_btsocket_hci_raw_ctloutput */ /* * Detach raw HCI socket */ void ng_btsocket_hci_raw_detach(struct socket *so) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); KASSERT(pcb != NULL, ("ng_btsocket_hci_raw_detach: pcb == NULL")); if (ng_btsocket_hci_raw_node == NULL) return; mtx_lock(&ng_btsocket_hci_raw_sockets_mtx); mtx_lock(&pcb->pcb_mtx); LIST_REMOVE(pcb, next); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_hci_raw_sockets_mtx); mtx_destroy(&pcb->pcb_mtx); bzero(pcb, sizeof(*pcb)); free(pcb, M_NETGRAPH_BTSOCKET_HCI_RAW); so->so_pcb = NULL; } /* ng_btsocket_hci_raw_detach */ /* * Disconnect raw HCI socket */ int ng_btsocket_hci_raw_disconnect(struct socket *so) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); if (pcb == NULL) return (EINVAL); if (ng_btsocket_hci_raw_node == NULL) return (EINVAL); mtx_lock(&pcb->pcb_mtx); soisdisconnected(so); mtx_unlock(&pcb->pcb_mtx); return (0); } /* ng_btsocket_hci_raw_disconnect */ -/* - * Get socket peer's address - */ - -int -ng_btsocket_hci_raw_peeraddr(struct socket *so, struct sockaddr **nam) -{ - return (ng_btsocket_hci_raw_sockaddr(so, nam)); -} /* ng_btsocket_hci_raw_peeraddr */ - /* * Send data */ int ng_btsocket_hci_raw_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *sa, struct mbuf *control, struct thread *td) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); struct mbuf *nam = NULL; int error = 0; if (ng_btsocket_hci_raw_node == NULL) { error = ENETDOWN; goto drop; } if (pcb == NULL) { error = EINVAL; goto drop; } if (control != NULL) { error = EINVAL; goto drop; } if (m->m_pkthdr.len < sizeof(ng_hci_cmd_pkt_t) || m->m_pkthdr.len > sizeof(ng_hci_cmd_pkt_t) + NG_HCI_CMD_PKT_SIZE) { error = EMSGSIZE; goto drop; } if (m->m_len < sizeof(ng_hci_cmd_pkt_t)) { if ((m = m_pullup(m, sizeof(ng_hci_cmd_pkt_t))) == NULL) { error = ENOBUFS; goto drop; } } if (*mtod(m, u_int8_t *) != NG_HCI_CMD_PKT) { error = ENOTSUP; goto drop; } mtx_lock(&pcb->pcb_mtx); error = ng_btsocket_hci_raw_filter(pcb, m, 0); if (error != 0) { mtx_unlock(&pcb->pcb_mtx); goto drop; } if (sa == NULL) { if (pcb->addr.hci_node[0] == 0) { mtx_unlock(&pcb->pcb_mtx); error = EDESTADDRREQ; goto drop; } sa = (struct sockaddr *) &pcb->addr; } MGET(nam, M_NOWAIT, MT_SONAME); if (nam == NULL) { mtx_unlock(&pcb->pcb_mtx); error = ENOBUFS; goto drop; } nam->m_len = sizeof(struct sockaddr_hci); bcopy(sa,mtod(nam, struct sockaddr_hci *),sizeof(struct sockaddr_hci)); nam->m_next = m; m = NULL; mtx_unlock(&pcb->pcb_mtx); return (ng_send_fn(ng_btsocket_hci_raw_node, NULL, ng_btsocket_hci_raw_output, nam, 0)); drop: NG_FREE_M(control); /* NG_FREE_M checks for != NULL */ NG_FREE_M(nam); NG_FREE_M(m); return (error); } /* ng_btsocket_hci_raw_send */ /* * Get socket address */ int -ng_btsocket_hci_raw_sockaddr(struct socket *so, struct sockaddr **nam) +ng_btsocket_hci_raw_sockaddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_hci_raw_pcb_p pcb = so2hci_raw_pcb(so); - struct sockaddr_hci sa; + struct sockaddr_hci *hci = (struct sockaddr_hci *)sa; if (pcb == NULL) return (EINVAL); if (ng_btsocket_hci_raw_node == NULL) return (EINVAL); - bzero(&sa, sizeof(sa)); - sa.hci_len = sizeof(sa); - sa.hci_family = AF_BLUETOOTH; + *hci = (struct sockaddr_hci ){ + .hci_len = sizeof(struct sockaddr_hci), + .hci_family = AF_BLUETOOTH, + }; mtx_lock(&pcb->pcb_mtx); - strlcpy(sa.hci_node, pcb->addr.hci_node, sizeof(sa.hci_node)); + strlcpy(hci->hci_node, pcb->addr.hci_node, sizeof(hci->hci_node)); mtx_unlock(&pcb->pcb_mtx); - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); - - return ((*nam == NULL)? ENOMEM : 0); -} /* ng_btsocket_hci_raw_sockaddr */ + return (0); +} diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c b/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c index d221cc34d36a..ae5405529667 100644 --- a/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c +++ b/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap.c @@ -1,2976 +1,2949 @@ /* * ng_btsocket_l2cap.c */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_l2cap.c,v 1.16 2003/09/14 23:29:06 max Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* MALLOC define */ #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_BTSOCKET_L2CAP, "netgraph_btsocks_l2cap", "Netgraph Bluetooth L2CAP sockets"); #else #define M_NETGRAPH_BTSOCKET_L2CAP M_NETGRAPH #endif /* NG_SEPARATE_MALLOC */ /* Netgraph node methods */ static ng_constructor_t ng_btsocket_l2cap_node_constructor; static ng_rcvmsg_t ng_btsocket_l2cap_node_rcvmsg; static ng_shutdown_t ng_btsocket_l2cap_node_shutdown; static ng_newhook_t ng_btsocket_l2cap_node_newhook; static ng_connect_t ng_btsocket_l2cap_node_connect; static ng_rcvdata_t ng_btsocket_l2cap_node_rcvdata; static ng_disconnect_t ng_btsocket_l2cap_node_disconnect; static void ng_btsocket_l2cap_input (void *, int); static void ng_btsocket_l2cap_rtclean (void *, int); /* Netgraph type descriptor */ static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_BTSOCKET_L2CAP_NODE_TYPE, .constructor = ng_btsocket_l2cap_node_constructor, .rcvmsg = ng_btsocket_l2cap_node_rcvmsg, .shutdown = ng_btsocket_l2cap_node_shutdown, .newhook = ng_btsocket_l2cap_node_newhook, .connect = ng_btsocket_l2cap_node_connect, .rcvdata = ng_btsocket_l2cap_node_rcvdata, .disconnect = ng_btsocket_l2cap_node_disconnect, }; /* Globals */ extern int ifqmaxlen; static u_int32_t ng_btsocket_l2cap_debug_level; static node_p ng_btsocket_l2cap_node; static struct ng_bt_itemq ng_btsocket_l2cap_queue; static struct mtx ng_btsocket_l2cap_queue_mtx; static struct task ng_btsocket_l2cap_queue_task; static LIST_HEAD(, ng_btsocket_l2cap_pcb) ng_btsocket_l2cap_sockets; static struct mtx ng_btsocket_l2cap_sockets_mtx; static LIST_HEAD(, ng_btsocket_l2cap_rtentry) ng_btsocket_l2cap_rt; static struct mtx ng_btsocket_l2cap_rt_mtx; static struct task ng_btsocket_l2cap_rt_task; static struct timeval ng_btsocket_l2cap_lasttime; static int ng_btsocket_l2cap_curpps; /* Sysctl tree */ SYSCTL_DECL(_net_bluetooth_l2cap_sockets); static SYSCTL_NODE(_net_bluetooth_l2cap_sockets, OID_AUTO, seq, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth SEQPACKET L2CAP sockets family"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_seq, OID_AUTO, debug_level, CTLFLAG_RW, &ng_btsocket_l2cap_debug_level, NG_BTSOCKET_WARN_LEVEL, "Bluetooth SEQPACKET L2CAP sockets debug level"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_seq, OID_AUTO, queue_len, CTLFLAG_RD, &ng_btsocket_l2cap_queue.len, 0, "Bluetooth SEQPACKET L2CAP sockets input queue length"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_seq, OID_AUTO, queue_maxlen, CTLFLAG_RD, &ng_btsocket_l2cap_queue.maxlen, 0, "Bluetooth SEQPACKET L2CAP sockets input queue max. length"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_seq, OID_AUTO, queue_drops, CTLFLAG_RD, &ng_btsocket_l2cap_queue.drops, 0, "Bluetooth SEQPACKET L2CAP sockets input queue drops"); /* Debug */ #define NG_BTSOCKET_L2CAP_INFO \ if (ng_btsocket_l2cap_debug_level >= NG_BTSOCKET_INFO_LEVEL && \ ppsratecheck(&ng_btsocket_l2cap_lasttime, &ng_btsocket_l2cap_curpps, 1)) \ printf #define NG_BTSOCKET_L2CAP_WARN \ if (ng_btsocket_l2cap_debug_level >= NG_BTSOCKET_WARN_LEVEL && \ ppsratecheck(&ng_btsocket_l2cap_lasttime, &ng_btsocket_l2cap_curpps, 1)) \ printf #define NG_BTSOCKET_L2CAP_ERR \ if (ng_btsocket_l2cap_debug_level >= NG_BTSOCKET_ERR_LEVEL && \ ppsratecheck(&ng_btsocket_l2cap_lasttime, &ng_btsocket_l2cap_curpps, 1)) \ printf #define NG_BTSOCKET_L2CAP_ALERT \ if (ng_btsocket_l2cap_debug_level >= NG_BTSOCKET_ALERT_LEVEL && \ ppsratecheck(&ng_btsocket_l2cap_lasttime, &ng_btsocket_l2cap_curpps, 1)) \ printf /* * Netgraph message processing routines */ static int ng_btsocket_l2cap_process_l2ca_con_req_rsp (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); static int ng_btsocket_l2cap_process_l2ca_con_rsp_rsp (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); static int ng_btsocket_l2cap_process_l2ca_con_ind (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); static int ng_btsocket_l2cap_process_l2ca_cfg_req_rsp (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); static int ng_btsocket_l2cap_process_l2ca_cfg_rsp_rsp (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); static int ng_btsocket_l2cap_process_l2ca_cfg_ind (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); static int ng_btsocket_l2cap_process_l2ca_discon_rsp (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); static int ng_btsocket_l2cap_process_l2ca_discon_ind (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); static int ng_btsocket_l2cap_process_l2ca_write_rsp (struct ng_mesg *, ng_btsocket_l2cap_rtentry_p); /* * Send L2CA_xxx messages to the lower layer */ static int ng_btsocket_l2cap_send_l2ca_con_req (ng_btsocket_l2cap_pcb_p); static int ng_btsocket_l2cap_send_l2ca_con_rsp_req (u_int32_t, ng_btsocket_l2cap_rtentry_p, bdaddr_p, int, int, int, int); static int ng_btsocket_l2cap_send_l2ca_cfg_req (ng_btsocket_l2cap_pcb_p); static int ng_btsocket_l2cap_send_l2ca_cfg_rsp (ng_btsocket_l2cap_pcb_p); static int ng_btsocket_l2cap_send_l2ca_discon_req (u_int32_t, ng_btsocket_l2cap_pcb_p); static int ng_btsocket_l2cap_send2 (ng_btsocket_l2cap_pcb_p); /* * Timeout processing routines */ static void ng_btsocket_l2cap_timeout (ng_btsocket_l2cap_pcb_p); static void ng_btsocket_l2cap_untimeout (ng_btsocket_l2cap_pcb_p); static void ng_btsocket_l2cap_process_timeout (void *); /* * Other stuff */ static ng_btsocket_l2cap_pcb_p ng_btsocket_l2cap_pcb_by_addr(bdaddr_p, int); static ng_btsocket_l2cap_pcb_p ng_btsocket_l2cap_pcb_by_token(u_int32_t); static ng_btsocket_l2cap_pcb_p ng_btsocket_l2cap_pcb_by_cid (bdaddr_p, int,int); static int ng_btsocket_l2cap_result2errno(int); static int ng_btsock_l2cap_addrtype_to_linktype(int addrtype); #define ng_btsocket_l2cap_wakeup_input_task() \ taskqueue_enqueue(taskqueue_swi_giant, &ng_btsocket_l2cap_queue_task) #define ng_btsocket_l2cap_wakeup_route_task() \ taskqueue_enqueue(taskqueue_swi_giant, &ng_btsocket_l2cap_rt_task) int ng_btsock_l2cap_addrtype_to_linktype(int addrtype) { switch(addrtype){ case BDADDR_LE_PUBLIC: return NG_HCI_LINK_LE_PUBLIC; case BDADDR_LE_RANDOM: return NG_HCI_LINK_LE_RANDOM; default: return NG_HCI_LINK_ACL; } } /***************************************************************************** ***************************************************************************** ** Netgraph node interface ***************************************************************************** *****************************************************************************/ /* * Netgraph node constructor. Do not allow to create node of this type. */ static int ng_btsocket_l2cap_node_constructor(node_p node) { return (EINVAL); } /* ng_btsocket_l2cap_node_constructor */ /* * Do local shutdown processing. Let old node go and create new fresh one. */ static int ng_btsocket_l2cap_node_shutdown(node_p node) { int error = 0; NG_NODE_UNREF(node); /* Create new node */ error = ng_make_node_common(&typestruct, &ng_btsocket_l2cap_node); if (error != 0) { NG_BTSOCKET_L2CAP_ALERT( "%s: Could not create Netgraph node, error=%d\n", __func__, error); ng_btsocket_l2cap_node = NULL; return (error); } error = ng_name_node(ng_btsocket_l2cap_node, NG_BTSOCKET_L2CAP_NODE_TYPE); if (error != 0) { NG_BTSOCKET_L2CAP_ALERT( "%s: Could not name Netgraph node, error=%d\n", __func__, error); NG_NODE_UNREF(ng_btsocket_l2cap_node); ng_btsocket_l2cap_node = NULL; return (error); } return (0); } /* ng_btsocket_l2cap_node_shutdown */ /* * We allow any hook to be connected to the node. */ static int ng_btsocket_l2cap_node_newhook(node_p node, hook_p hook, char const *name) { return (0); } /* ng_btsocket_l2cap_node_newhook */ /* * Just say "YEP, that's OK by me!" */ static int ng_btsocket_l2cap_node_connect(hook_p hook) { NG_HOOK_SET_PRIVATE(hook, NULL); NG_HOOK_REF(hook); /* Keep extra reference to the hook */ #if 0 NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); NG_HOOK_FORCE_QUEUE(hook); #endif return (0); } /* ng_btsocket_l2cap_node_connect */ /* * Hook disconnection. Schedule route cleanup task */ static int ng_btsocket_l2cap_node_disconnect(hook_p hook) { /* * If hook has private information than we must have this hook in * the routing table and must schedule cleaning for the routing table. * Otherwise hook was connected but we never got "hook_info" message, * so we have never added this hook to the routing table and it save * to just delete it. */ if (NG_HOOK_PRIVATE(hook) != NULL) return (ng_btsocket_l2cap_wakeup_route_task()); NG_HOOK_UNREF(hook); /* Remove extra reference */ return (0); } /* ng_btsocket_l2cap_node_disconnect */ /* * Process incoming messages */ static int ng_btsocket_l2cap_node_rcvmsg(node_p node, item_p item, hook_p hook) { struct ng_mesg *msg = NGI_MSG(item); /* item still has message */ int error = 0; if (msg != NULL && msg->header.typecookie == NGM_L2CAP_COOKIE) { mtx_lock(&ng_btsocket_l2cap_queue_mtx); if (NG_BT_ITEMQ_FULL(&ng_btsocket_l2cap_queue)) { NG_BTSOCKET_L2CAP_ERR( "%s: Input queue is full (msg)\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_l2cap_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { if (hook != NULL) { NG_HOOK_REF(hook); NGI_SET_HOOK(item, hook); } NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_l2cap_queue, item); error = ng_btsocket_l2cap_wakeup_input_task(); } mtx_unlock(&ng_btsocket_l2cap_queue_mtx); } else { NG_FREE_ITEM(item); error = EINVAL; } return (error); } /* ng_btsocket_l2cap_node_rcvmsg */ /* * Receive data on a hook */ static int ng_btsocket_l2cap_node_rcvdata(hook_p hook, item_p item) { int error = 0; mtx_lock(&ng_btsocket_l2cap_queue_mtx); if (NG_BT_ITEMQ_FULL(&ng_btsocket_l2cap_queue)) { NG_BTSOCKET_L2CAP_ERR( "%s: Input queue is full (data)\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_l2cap_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { NG_HOOK_REF(hook); NGI_SET_HOOK(item, hook); NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_l2cap_queue, item); error = ng_btsocket_l2cap_wakeup_input_task(); } mtx_unlock(&ng_btsocket_l2cap_queue_mtx); return (error); } /* ng_btsocket_l2cap_node_rcvdata */ /* * Process L2CA_Connect respose. Socket layer must have initiated connection, * so we have to have a socket associated with message token. */ static int ng_btsocket_l2cap_process_l2ca_con_req_rsp(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_con_op *op = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; int error = 0; if (msg->header.arglen != sizeof(*op)) return (EMSGSIZE); op = (ng_l2cap_l2ca_con_op *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* Look for the socket with the token */ pcb = ng_btsocket_l2cap_pcb_by_token(msg->header.token); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_Connect response, token=%d, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, result=%d, status=%d, " \ "state=%d\n", __func__, msg->header.token, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->psm, op->lcid, op->result, op->status, pcb->state); if (pcb->state != NG_BTSOCKET_L2CAP_CONNECTING) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } ng_btsocket_l2cap_untimeout(pcb); if (op->result == NG_L2CAP_PENDING) { ng_btsocket_l2cap_timeout(pcb); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } if (op->result == NG_L2CAP_SUCCESS){ if((pcb->idtype == NG_L2CAP_L2CA_IDTYPE_ATT)|| (pcb->idtype == NG_L2CAP_L2CA_IDTYPE_SMP)){ pcb->encryption = op->encryption; pcb->cid = op->lcid; if(pcb->need_encrypt && !(pcb->encryption)){ ng_btsocket_l2cap_timeout(pcb); pcb->state = NG_BTSOCKET_L2CAP_W4_ENC_CHANGE; }else{ pcb->state = NG_BTSOCKET_L2CAP_OPEN; soisconnected(pcb->so); } }else{ /* * Channel is now open, so update local channel ID and * start configuration process. Source and destination * addresses as well as route must be already set. */ pcb->cid = op->lcid; pcb->encryption = op->encryption; error = ng_btsocket_l2cap_send_l2ca_cfg_req(pcb); if (error != 0) { /* Send disconnect request with "zero" token */ ng_btsocket_l2cap_send_l2ca_discon_req(0, pcb); /* ... and close the socket */ pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); } else { pcb->cfg_state = NG_BTSOCKET_L2CAP_CFG_IN_SENT; pcb->state = NG_BTSOCKET_L2CAP_CONFIGURING; ng_btsocket_l2cap_timeout(pcb); } } } else { /* * We have failed to open connection, so convert result * code to "errno" code and disconnect the socket. Channel * already has been closed. */ pcb->so->so_error = ng_btsocket_l2cap_result2errno(op->result); pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (error); } /* ng_btsocket_l2cap_process_l2ca_con_req_rsp */ /* * Process L2CA_ConnectRsp response */ static int ng_btsocket_l2cap_process_l2ca_con_rsp_rsp(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_con_rsp_op *op = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; if (msg->header.arglen != sizeof(*op)) return (EMSGSIZE); op = (ng_l2cap_l2ca_con_rsp_op *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* Look for the socket with the token */ pcb = ng_btsocket_l2cap_pcb_by_token(msg->header.token); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_ConnectRsp response, token=%d, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, result=%d, state=%d\n", __func__, msg->header.token, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->psm, pcb->cid, op->result, pcb->state); if (pcb->state != NG_BTSOCKET_L2CAP_CONNECTING) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } ng_btsocket_l2cap_untimeout(pcb); /* Check the result and disconnect the socket on failure */ if (op->result != NG_L2CAP_SUCCESS) { /* Close the socket - channel already closed */ pcb->so->so_error = ng_btsocket_l2cap_result2errno(op->result); pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); } else { /* Move to CONFIGURING state and wait for CONFIG_IND */ pcb->cfg_state = 0; pcb->state = NG_BTSOCKET_L2CAP_CONFIGURING; ng_btsocket_l2cap_timeout(pcb); } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } /* ng_btsocket_process_l2ca_con_rsp_rsp */ /* * Process L2CA_Connect indicator. Find socket that listens on address * and PSM. Find exact or closest match. Create new socket and initiate * connection. */ static int ng_btsocket_l2cap_process_l2ca_con_ind(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_con_ind_ip *ip = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL, *pcb1 = NULL; int error = 0; u_int32_t token = 0; u_int16_t result = 0; if (msg->header.arglen != sizeof(*ip)) return (EMSGSIZE); ip = (ng_l2cap_l2ca_con_ind_ip *)(msg->data); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_Connect indicator, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, ident=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], ip->bdaddr.b[5], ip->bdaddr.b[4], ip->bdaddr.b[3], ip->bdaddr.b[2], ip->bdaddr.b[1], ip->bdaddr.b[0], ip->psm, ip->lcid, ip->ident); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); pcb = ng_btsocket_l2cap_pcb_by_addr(&rt->src, ip->psm); if (pcb != NULL) { struct socket *so1; mtx_lock(&pcb->pcb_mtx); CURVNET_SET(pcb->so->so_vnet); so1 = sonewconn(pcb->so, 0); CURVNET_RESTORE(); if (so1 == NULL) { result = NG_L2CAP_NO_RESOURCES; goto respond; } /* * If we got here than we have created new socket. So complete * connection. If we we listening on specific address then copy * source address from listening socket, otherwise copy source * address from hook's routing information. */ pcb1 = so2l2cap_pcb(so1); KASSERT((pcb1 != NULL), ("%s: pcb1 == NULL\n", __func__)); mtx_lock(&pcb1->pcb_mtx); if (bcmp(&pcb->src, NG_HCI_BDADDR_ANY, sizeof(pcb->src)) != 0) bcopy(&pcb->src, &pcb1->src, sizeof(pcb1->src)); else bcopy(&rt->src, &pcb1->src, sizeof(pcb1->src)); pcb1->flags &= ~NG_BTSOCKET_L2CAP_CLIENT; bcopy(&ip->bdaddr, &pcb1->dst, sizeof(pcb1->dst)); pcb1->psm = ip->psm; pcb1->cid = ip->lcid; pcb1->rt = rt; /* Copy socket settings */ pcb1->imtu = pcb->imtu; bcopy(&pcb->oflow, &pcb1->oflow, sizeof(pcb1->oflow)); pcb1->flush_timo = pcb->flush_timo; token = pcb1->token; } else /* Nobody listens on requested BDADDR/PSM */ result = NG_L2CAP_PSM_NOT_SUPPORTED; respond: error = ng_btsocket_l2cap_send_l2ca_con_rsp_req(token, rt, &ip->bdaddr, ip->ident, ip->lcid, result,ip->linktype); if (pcb1 != NULL) { if (error != 0) { pcb1->so->so_error = error; pcb1->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb1->so); } else { pcb1->state = NG_BTSOCKET_L2CAP_CONNECTING; soisconnecting(pcb1->so); ng_btsocket_l2cap_timeout(pcb1); } mtx_unlock(&pcb1->pcb_mtx); } if (pcb != NULL) mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (error); } /* ng_btsocket_l2cap_process_l2ca_con_ind */ /*Encryption Change*/ static int ng_btsocket_l2cap_process_l2ca_enc_change(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_enc_chg_op *op = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; if (msg->header.arglen != sizeof(*op)) return (EMSGSIZE); op = (ng_l2cap_l2ca_enc_chg_op *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); pcb = ng_btsocket_l2cap_pcb_by_cid(&rt->src, op->lcid, op->idtype); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } mtx_lock(&pcb->pcb_mtx); pcb->encryption = op->result; if(pcb->need_encrypt){ ng_btsocket_l2cap_untimeout(pcb); if(pcb->state != NG_BTSOCKET_L2CAP_W4_ENC_CHANGE){ NG_BTSOCKET_L2CAP_WARN("%s: Invalid pcb status %d", __func__, pcb->state); }else if(pcb->encryption){ pcb->state = NG_BTSOCKET_L2CAP_OPEN; soisconnected(pcb->so); }else{ pcb->so->so_error = EPERM; ng_btsocket_l2cap_send_l2ca_discon_req(0, pcb); pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); } } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return 0; } /* * Process L2CA_Config response */ static int ng_btsocket_l2cap_process_l2ca_cfg_req_rsp(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_cfg_op *op = NULL; ng_btsocket_l2cap_pcb_p pcb = NULL; if (msg->header.arglen != sizeof(*op)) return (EMSGSIZE); op = (ng_l2cap_l2ca_cfg_op *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* * Socket must have issued a Configure request, so we must have a * socket that wants to be configured. Use Netgraph message token * to find it */ pcb = ng_btsocket_l2cap_pcb_by_token(msg->header.token); if (pcb == NULL) { /* * XXX FIXME what to do here? We could not find a * socket with requested token. We even can not send * Disconnect, because we do not know channel ID */ mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_Config response, token=%d, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, result=%d, state=%d, " \ "cfg_state=%x\n", __func__, msg->header.token, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->psm, pcb->cid, op->result, pcb->state, pcb->cfg_state); if (pcb->state != NG_BTSOCKET_L2CAP_CONFIGURING) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } if (op->result == NG_L2CAP_SUCCESS) { /* * XXX FIXME Actually set flush and link timeout. * Set QoS here if required. Resolve conficts (flush_timo). * Save incoming MTU (peer's outgoing MTU) and outgoing flow * spec. */ pcb->imtu = op->imtu; bcopy(&op->oflow, &pcb->oflow, sizeof(pcb->oflow)); pcb->flush_timo = op->flush_timo; /* * We have configured incoming side, so record it and check * if configuration is complete. If complete then mark socket * as connected, otherwise wait for the peer. */ pcb->cfg_state &= ~NG_BTSOCKET_L2CAP_CFG_IN_SENT; pcb->cfg_state |= NG_BTSOCKET_L2CAP_CFG_IN; if (pcb->cfg_state == NG_BTSOCKET_L2CAP_CFG_BOTH) { /* Configuration complete - mark socket as open */ ng_btsocket_l2cap_untimeout(pcb); pcb->state = NG_BTSOCKET_L2CAP_OPEN; soisconnected(pcb->so); } } else { /* * Something went wrong. Could be unacceptable parameters, * reject or unknown option. That's too bad, but we will * not negotiate. Send Disconnect and close the channel. */ ng_btsocket_l2cap_untimeout(pcb); switch (op->result) { case NG_L2CAP_UNACCEPTABLE_PARAMS: case NG_L2CAP_UNKNOWN_OPTION: pcb->so->so_error = EINVAL; break; default: pcb->so->so_error = ECONNRESET; break; } /* Send disconnect with "zero" token */ ng_btsocket_l2cap_send_l2ca_discon_req(0, pcb); /* ... and close the socket */ pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } /* ng_btsocket_l2cap_process_l2ca_cfg_req_rsp */ /* * Process L2CA_ConfigRsp response */ static int ng_btsocket_l2cap_process_l2ca_cfg_rsp_rsp(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_cfg_rsp_op *op = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; int error = 0; if (msg->header.arglen != sizeof(*op)) return (EMSGSIZE); op = (ng_l2cap_l2ca_cfg_rsp_op *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* Look for the socket with the token */ pcb = ng_btsocket_l2cap_pcb_by_token(msg->header.token); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_ConfigRsp response, token=%d, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, result=%d, state=%d, " \ "cfg_state=%x\n", __func__, msg->header.token, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->psm, pcb->cid, op->result, pcb->state, pcb->cfg_state); if (pcb->state != NG_BTSOCKET_L2CAP_CONFIGURING) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } /* Check the result and disconnect socket of failure */ if (op->result != NG_L2CAP_SUCCESS) goto disconnect; /* * Now we done with remote side configuration. Configure local * side if we have not done it yet. */ pcb->cfg_state &= ~NG_BTSOCKET_L2CAP_CFG_OUT_SENT; pcb->cfg_state |= NG_BTSOCKET_L2CAP_CFG_OUT; if (pcb->cfg_state == NG_BTSOCKET_L2CAP_CFG_BOTH) { /* Configuration complete - mask socket as open */ ng_btsocket_l2cap_untimeout(pcb); pcb->state = NG_BTSOCKET_L2CAP_OPEN; soisconnected(pcb->so); } else { if (!(pcb->cfg_state & NG_BTSOCKET_L2CAP_CFG_IN_SENT)) { /* Send L2CA_Config request - incoming path */ error = ng_btsocket_l2cap_send_l2ca_cfg_req(pcb); if (error != 0) goto disconnect; pcb->cfg_state |= NG_BTSOCKET_L2CAP_CFG_IN_SENT; } } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (error); disconnect: ng_btsocket_l2cap_untimeout(pcb); /* Send disconnect with "zero" token */ ng_btsocket_l2cap_send_l2ca_discon_req(0, pcb); /* ... and close the socket */ pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (error); } /* ng_btsocket_l2cap_process_l2ca_cfg_rsp_rsp */ /* * Process L2CA_Config indicator */ static int ng_btsocket_l2cap_process_l2ca_cfg_ind(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_cfg_ind_ip *ip = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; int error = 0; if (msg->header.arglen != sizeof(*ip)) return (EMSGSIZE); ip = (ng_l2cap_l2ca_cfg_ind_ip *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* Check for the open socket that has given channel ID */ pcb = ng_btsocket_l2cap_pcb_by_cid(&rt->src, ip->lcid, NG_L2CAP_L2CA_IDTYPE_BREDR); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_Config indicator, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, state=%d, cfg_state=%x\n", __func__, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->psm, pcb->cid, pcb->state, pcb->cfg_state); /* XXX FIXME re-configuration on open socket */ if (pcb->state != NG_BTSOCKET_L2CAP_CONFIGURING) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } /* * XXX FIXME Actually set flush and link timeout. Set QoS here if * required. Resolve conficts (flush_timo). Note outgoing MTU (peer's * incoming MTU) and incoming flow spec. */ pcb->omtu = ip->omtu; bcopy(&ip->iflow, &pcb->iflow, sizeof(pcb->iflow)); pcb->flush_timo = ip->flush_timo; /* * Send L2CA_Config response to our peer and check for the errors, * if any send disconnect to close the channel. */ if (!(pcb->cfg_state & NG_BTSOCKET_L2CAP_CFG_OUT_SENT)) { error = ng_btsocket_l2cap_send_l2ca_cfg_rsp(pcb); if (error != 0) { ng_btsocket_l2cap_untimeout(pcb); pcb->so->so_error = error; /* Send disconnect with "zero" token */ ng_btsocket_l2cap_send_l2ca_discon_req(0, pcb); /* ... and close the socket */ pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); } else pcb->cfg_state |= NG_BTSOCKET_L2CAP_CFG_OUT_SENT; } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (error); } /* ng_btsocket_l2cap_process_l2cap_cfg_ind */ /* * Process L2CA_Disconnect response */ static int ng_btsocket_l2cap_process_l2ca_discon_rsp(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_discon_op *op = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; /* Check message */ if (msg->header.arglen != sizeof(*op)) return (EMSGSIZE); op = (ng_l2cap_l2ca_discon_op *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* * Socket layer must have issued L2CA_Disconnect request, so there * must be a socket that wants to be disconnected. Use Netgraph * message token to find it. */ pcb = ng_btsocket_l2cap_pcb_by_token(msg->header.token); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } mtx_lock(&pcb->pcb_mtx); /* XXX Close socket no matter what op->result says */ if (pcb->state != NG_BTSOCKET_L2CAP_CLOSED) { NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_Disconnect response, token=%d, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, result=%d, state=%d\n", __func__, msg->header.token, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->psm, pcb->cid, op->result, pcb->state); ng_btsocket_l2cap_untimeout(pcb); pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } /* ng_btsocket_l2cap_process_l2ca_discon_rsp */ /* * Process L2CA_Disconnect indicator */ static int ng_btsocket_l2cap_process_l2ca_discon_ind(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_discon_ind_ip *ip = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; /* Check message */ if (msg->header.arglen != sizeof(*ip)) return (EMSGSIZE); ip = (ng_l2cap_l2ca_discon_ind_ip *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* Look for the socket with given channel ID */ pcb = ng_btsocket_l2cap_pcb_by_cid(&rt->src, ip->lcid, ip->idtype); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } /* * Channel has already been destroyed, so disconnect the socket * and be done with it. If there was any pending request we can * not do anything here anyway. */ mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_Disconnect indicator, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, state=%d\n", __func__, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->psm, pcb->cid, pcb->state); if (pcb->flags & NG_BTSOCKET_L2CAP_TIMO) ng_btsocket_l2cap_untimeout(pcb); pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } /* ng_btsocket_l2cap_process_l2ca_discon_ind */ /* * Process L2CA_Write response */ static int ng_btsocket_l2cap_process_l2ca_write_rsp(struct ng_mesg *msg, ng_btsocket_l2cap_rtentry_p rt) { ng_l2cap_l2ca_write_op *op = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; /* Check message */ if (msg->header.arglen != sizeof(*op)) return (EMSGSIZE); op = (ng_l2cap_l2ca_write_op *)(msg->data); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* Look for the socket with given token */ pcb = ng_btsocket_l2cap_pcb_by_token(msg->header.token); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CA_Write response, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, lcid=%d, result=%d, length=%d, " \ "state=%d\n", __func__, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->psm, pcb->cid, op->result, op->length, pcb->state); if (pcb->state != NG_BTSOCKET_L2CAP_OPEN) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (ENOENT); } ng_btsocket_l2cap_untimeout(pcb); /* * Check if we have more data to send */ sbdroprecord(&pcb->so->so_snd); if (sbavail(&pcb->so->so_snd) > 0) { if (ng_btsocket_l2cap_send2(pcb) == 0) ng_btsocket_l2cap_timeout(pcb); else sbdroprecord(&pcb->so->so_snd); /* XXX */ } /* * Now set the result, drop packet from the socket send queue and * ask for more (wakeup sender) */ pcb->so->so_error = ng_btsocket_l2cap_result2errno(op->result); sowwakeup(pcb->so); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } /* ng_btsocket_l2cap_process_l2ca_write_rsp */ /* * Send L2CA_Connect request */ static int ng_btsocket_l2cap_send_l2ca_con_req(ng_btsocket_l2cap_pcb_p pcb) { struct ng_mesg *msg = NULL; ng_l2cap_l2ca_con_ip *ip = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->rt == NULL || pcb->rt->hook == NULL || NG_HOOK_NOT_VALID(pcb->rt->hook)) return (ENETDOWN); NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_L2CA_CON, sizeof(*ip), M_NOWAIT); if (msg == NULL) return (ENOMEM); msg->header.token = pcb->token; ip = (ng_l2cap_l2ca_con_ip *)(msg->data); bcopy(&pcb->dst, &ip->bdaddr, sizeof(ip->bdaddr)); ip->psm = pcb->psm; ip->linktype = ng_btsock_l2cap_addrtype_to_linktype(pcb->dsttype); ip->idtype = pcb->idtype; NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_node, msg,pcb->rt->hook, 0); return (error); } /* ng_btsocket_l2cap_send_l2ca_con_req */ /* * Send L2CA_Connect response */ static int ng_btsocket_l2cap_send_l2ca_con_rsp_req(u_int32_t token, ng_btsocket_l2cap_rtentry_p rt, bdaddr_p dst, int ident, int lcid, int result, int linktype) { struct ng_mesg *msg = NULL; ng_l2cap_l2ca_con_rsp_ip *ip = NULL; int error = 0; if (rt == NULL || rt->hook == NULL || NG_HOOK_NOT_VALID(rt->hook)) return (ENETDOWN); NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_L2CA_CON_RSP, sizeof(*ip), M_NOWAIT); if (msg == NULL) return (ENOMEM); msg->header.token = token; ip = (ng_l2cap_l2ca_con_rsp_ip *)(msg->data); bcopy(dst, &ip->bdaddr, sizeof(ip->bdaddr)); ip->ident = ident; ip->lcid = lcid; ip->linktype = linktype; ip->result = result; ip->status = 0; NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_node, msg, rt->hook, 0); return (error); } /* ng_btsocket_l2cap_send_l2ca_con_rsp_req */ /* * Send L2CA_Config request */ static int ng_btsocket_l2cap_send_l2ca_cfg_req(ng_btsocket_l2cap_pcb_p pcb) { struct ng_mesg *msg = NULL; ng_l2cap_l2ca_cfg_ip *ip = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->rt == NULL || pcb->rt->hook == NULL || NG_HOOK_NOT_VALID(pcb->rt->hook)) return (ENETDOWN); NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_L2CA_CFG, sizeof(*ip), M_NOWAIT); if (msg == NULL) return (ENOMEM); msg->header.token = pcb->token; ip = (ng_l2cap_l2ca_cfg_ip *)(msg->data); ip->lcid = pcb->cid; ip->imtu = pcb->imtu; bcopy(&pcb->oflow, &ip->oflow, sizeof(ip->oflow)); ip->flush_timo = pcb->flush_timo; ip->link_timo = pcb->link_timo; NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_node, msg,pcb->rt->hook, 0); return (error); } /* ng_btsocket_l2cap_send_l2ca_cfg_req */ /* * Send L2CA_Config response */ static int ng_btsocket_l2cap_send_l2ca_cfg_rsp(ng_btsocket_l2cap_pcb_p pcb) { struct ng_mesg *msg = NULL; ng_l2cap_l2ca_cfg_rsp_ip *ip = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->rt == NULL || pcb->rt->hook == NULL || NG_HOOK_NOT_VALID(pcb->rt->hook)) return (ENETDOWN); NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_L2CA_CFG_RSP, sizeof(*ip), M_NOWAIT); if (msg == NULL) return (ENOMEM); msg->header.token = pcb->token; ip = (ng_l2cap_l2ca_cfg_rsp_ip *)(msg->data); ip->lcid = pcb->cid; ip->omtu = pcb->omtu; bcopy(&pcb->iflow, &ip->iflow, sizeof(ip->iflow)); NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_node, msg, pcb->rt->hook, 0); return (error); } /* ng_btsocket_l2cap_send_l2ca_cfg_rsp */ /* * Send L2CA_Disconnect request */ static int ng_btsocket_l2cap_send_l2ca_discon_req(u_int32_t token, ng_btsocket_l2cap_pcb_p pcb) { struct ng_mesg *msg = NULL; ng_l2cap_l2ca_discon_ip *ip = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->rt == NULL || pcb->rt->hook == NULL || NG_HOOK_NOT_VALID(pcb->rt->hook)) return (ENETDOWN); NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_L2CA_DISCON, sizeof(*ip), M_NOWAIT); if (msg == NULL) return (ENOMEM); msg->header.token = token; ip = (ng_l2cap_l2ca_discon_ip *)(msg->data); ip->lcid = pcb->cid; ip->idtype = pcb->idtype; NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_node, msg,pcb->rt->hook, 0); return (error); } /* ng_btsocket_l2cap_send_l2ca_discon_req */ /***************************************************************************** ***************************************************************************** ** Socket interface ***************************************************************************** *****************************************************************************/ /* * L2CAP sockets data input routine */ static void ng_btsocket_l2cap_data_input(struct mbuf *m, hook_p hook) { ng_l2cap_hdr_t *hdr = NULL; ng_l2cap_clt_hdr_t *clt_hdr = NULL; ng_btsocket_l2cap_pcb_t *pcb = NULL; ng_btsocket_l2cap_rtentry_t *rt = NULL; uint16_t idtype; if (hook == NULL) { NG_BTSOCKET_L2CAP_ALERT( "%s: Invalid source hook for L2CAP data packet\n", __func__); goto drop; } rt = (ng_btsocket_l2cap_rtentry_t *) NG_HOOK_PRIVATE(hook); if (rt == NULL) { NG_BTSOCKET_L2CAP_ALERT( "%s: Could not find out source bdaddr for L2CAP data packet\n", __func__); goto drop; } m = m_pullup(m, sizeof(uint16_t)); idtype = *mtod(m, uint16_t *); m_adj(m, sizeof(uint16_t)); /* Make sure we can access header */ if (m->m_pkthdr.len < sizeof(*hdr)) { NG_BTSOCKET_L2CAP_ERR( "%s: L2CAP data packet too small, len=%d\n", __func__, m->m_pkthdr.len); goto drop; } if (m->m_len < sizeof(*hdr)) { m = m_pullup(m, sizeof(*hdr)); if (m == NULL) goto drop; } /* Strip L2CAP packet header and verify packet length */ hdr = mtod(m, ng_l2cap_hdr_t *); m_adj(m, sizeof(*hdr)); if (hdr->length != m->m_pkthdr.len) { NG_BTSOCKET_L2CAP_ERR( "%s: Bad L2CAP data packet length, len=%d, length=%d\n", __func__, m->m_pkthdr.len, hdr->length); goto drop; } /* * Now process packet. Two cases: * * 1) Normal packet (cid != 2) then find connected socket and append * mbuf to the socket queue. Wakeup socket. * * 2) Broadcast packet (cid == 2) then find all sockets that connected * to the given PSM and have SO_BROADCAST bit set and append mbuf * to the socket queue. Wakeup socket. */ NG_BTSOCKET_L2CAP_INFO( "%s: Received L2CAP data packet: src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dcid=%d, length=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], hdr->dcid, hdr->length); if ((hdr->dcid >= NG_L2CAP_FIRST_CID) || (idtype == NG_L2CAP_L2CA_IDTYPE_ATT)|| (idtype == NG_L2CAP_L2CA_IDTYPE_SMP) ){ mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* Normal packet: find connected socket */ pcb = ng_btsocket_l2cap_pcb_by_cid(&rt->src, hdr->dcid,idtype); if (pcb == NULL) { mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); goto drop; } mtx_lock(&pcb->pcb_mtx); if (pcb->state != NG_BTSOCKET_L2CAP_OPEN) { NG_BTSOCKET_L2CAP_ERR( "%s: No connected socket found, src bdaddr=%x:%x:%x:%x:%x:%x, dcid=%d, " \ "state=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], hdr->dcid, pcb->state); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); goto drop; } /* Check packet size against socket's incoming MTU */ if (hdr->length > pcb->imtu) { NG_BTSOCKET_L2CAP_ERR( "%s: L2CAP data packet too big, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dcid=%d, length=%d, imtu=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], hdr->dcid, hdr->length, pcb->imtu); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); goto drop; } /* Check if we have enough space in socket receive queue */ if (m->m_pkthdr.len > sbspace(&pcb->so->so_rcv)) { /* * This is really bad. Receive queue on socket does * not have enough space for the packet. We do not * have any other choice but drop the packet. L2CAP * does not provide any flow control. */ NG_BTSOCKET_L2CAP_ERR( "%s: Not enough space in socket receive queue. Dropping L2CAP data packet, " \ "src bdaddr=%x:%x:%x:%x:%x:%x, dcid=%d, len=%d, space=%ld\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], hdr->dcid, m->m_pkthdr.len, sbspace(&pcb->so->so_rcv)); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); goto drop; } /* Append packet to the socket receive queue and wakeup */ sbappendrecord(&pcb->so->so_rcv, m); m = NULL; sorwakeup(pcb->so); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); } else if (hdr->dcid == NG_L2CAP_CLT_CID) { /* Broadcast packet: give packet to all sockets */ /* Check packet size against connectionless MTU */ if (hdr->length > NG_L2CAP_MTU_DEFAULT) { NG_BTSOCKET_L2CAP_ERR( "%s: Connectionless L2CAP data packet too big, " \ "src bdaddr=%x:%x:%x:%x:%x:%x, length=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], hdr->length); goto drop; } /* Make sure we can access connectionless header */ if (m->m_pkthdr.len < sizeof(*clt_hdr)) { NG_BTSOCKET_L2CAP_ERR( "%s: Can not get L2CAP connectionless packet header, " \ "src bdaddr=%x:%x:%x:%x:%x:%x, length=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], hdr->length); goto drop; } if (m->m_len < sizeof(*clt_hdr)) { m = m_pullup(m, sizeof(*clt_hdr)); if (m == NULL) goto drop; } /* Strip connectionless header and deliver packet */ clt_hdr = mtod(m, ng_l2cap_clt_hdr_t *); m_adj(m, sizeof(*clt_hdr)); NG_BTSOCKET_L2CAP_INFO( "%s: Got L2CAP connectionless data packet, " \ "src bdaddr=%x:%x:%x:%x:%x:%x, psm=%d, length=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], clt_hdr->psm, hdr->length); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); LIST_FOREACH(pcb, &ng_btsocket_l2cap_sockets, next) { struct mbuf *copy = NULL; mtx_lock(&pcb->pcb_mtx); if (bcmp(&rt->src, &pcb->src, sizeof(pcb->src)) != 0 || pcb->psm != clt_hdr->psm || pcb->state != NG_BTSOCKET_L2CAP_OPEN || (pcb->so->so_options & SO_BROADCAST) == 0 || m->m_pkthdr.len > sbspace(&pcb->so->so_rcv)) goto next; /* * Create a copy of the packet and append it to the * socket's queue. If m_dup() failed - no big deal * it is a broadcast traffic after all */ copy = m_dup(m, M_NOWAIT); if (copy != NULL) { sbappendrecord(&pcb->so->so_rcv, copy); sorwakeup(pcb->so); } next: mtx_unlock(&pcb->pcb_mtx); } mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); } drop: NG_FREE_M(m); /* checks for m != NULL */ } /* ng_btsocket_l2cap_data_input */ /* * L2CAP sockets default message input routine */ static void ng_btsocket_l2cap_default_msg_input(struct ng_mesg *msg, hook_p hook) { switch (msg->header.cmd) { case NGM_L2CAP_NODE_HOOK_INFO: { ng_btsocket_l2cap_rtentry_t *rt = NULL; ng_l2cap_node_hook_info_ep *ep = (ng_l2cap_node_hook_info_ep *)msg->data; if (hook == NULL || msg->header.arglen != sizeof(*ep)) break; if (bcmp(&ep->addr, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) break; mtx_lock(&ng_btsocket_l2cap_rt_mtx); rt = (ng_btsocket_l2cap_rtentry_t *) NG_HOOK_PRIVATE(hook); if (rt == NULL) { rt = malloc(sizeof(*rt), M_NETGRAPH_BTSOCKET_L2CAP, M_NOWAIT|M_ZERO); if (rt == NULL) { mtx_unlock(&ng_btsocket_l2cap_rt_mtx); break; } LIST_INSERT_HEAD(&ng_btsocket_l2cap_rt, rt, next); NG_HOOK_SET_PRIVATE(hook, rt); } bcopy(&ep->addr, &rt->src, sizeof(rt->src)); rt->hook = hook; mtx_unlock(&ng_btsocket_l2cap_rt_mtx); NG_BTSOCKET_L2CAP_INFO( "%s: Updating hook \"%s\", src bdaddr=%x:%x:%x:%x:%x:%x\n", __func__, NG_HOOK_NAME(hook), rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0]); } break; default: NG_BTSOCKET_L2CAP_WARN( "%s: Unknown message, cmd=%d\n", __func__, msg->header.cmd); break; } NG_FREE_MSG(msg); /* Checks for msg != NULL */ } /* ng_btsocket_l2cap_default_msg_input */ /* * L2CAP sockets L2CA message input routine */ static void ng_btsocket_l2cap_l2ca_msg_input(struct ng_mesg *msg, hook_p hook) { ng_btsocket_l2cap_rtentry_p rt = NULL; if (hook == NULL) { NG_BTSOCKET_L2CAP_ALERT( "%s: Invalid source hook for L2CA message\n", __func__); goto drop; } rt = (ng_btsocket_l2cap_rtentry_p) NG_HOOK_PRIVATE(hook); if (rt == NULL) { NG_BTSOCKET_L2CAP_ALERT( "%s: Could not find out source bdaddr for L2CA message\n", __func__); goto drop; } switch (msg->header.cmd) { case NGM_L2CAP_L2CA_CON: /* L2CA_Connect response */ ng_btsocket_l2cap_process_l2ca_con_req_rsp(msg, rt); break; case NGM_L2CAP_L2CA_CON_RSP: /* L2CA_ConnectRsp response */ ng_btsocket_l2cap_process_l2ca_con_rsp_rsp(msg, rt); break; case NGM_L2CAP_L2CA_CON_IND: /* L2CA_Connect indicator */ ng_btsocket_l2cap_process_l2ca_con_ind(msg, rt); break; case NGM_L2CAP_L2CA_CFG: /* L2CA_Config response */ ng_btsocket_l2cap_process_l2ca_cfg_req_rsp(msg, rt); break; case NGM_L2CAP_L2CA_CFG_RSP: /* L2CA_ConfigRsp response */ ng_btsocket_l2cap_process_l2ca_cfg_rsp_rsp(msg, rt); break; case NGM_L2CAP_L2CA_CFG_IND: /* L2CA_Config indicator */ ng_btsocket_l2cap_process_l2ca_cfg_ind(msg, rt); break; case NGM_L2CAP_L2CA_DISCON: /* L2CA_Disconnect response */ ng_btsocket_l2cap_process_l2ca_discon_rsp(msg, rt); break; case NGM_L2CAP_L2CA_DISCON_IND: /* L2CA_Disconnect indicator */ ng_btsocket_l2cap_process_l2ca_discon_ind(msg, rt); break; case NGM_L2CAP_L2CA_WRITE: /* L2CA_Write response */ ng_btsocket_l2cap_process_l2ca_write_rsp(msg, rt); break; case NGM_L2CAP_L2CA_ENC_CHANGE: ng_btsocket_l2cap_process_l2ca_enc_change(msg, rt); break; /* XXX FIXME add other L2CA messages */ default: NG_BTSOCKET_L2CAP_WARN( "%s: Unknown L2CA message, cmd=%d\n", __func__, msg->header.cmd); break; } drop: NG_FREE_MSG(msg); } /* ng_btsocket_l2cap_l2ca_msg_input */ /* * L2CAP sockets input routine */ static void ng_btsocket_l2cap_input(void *context, int pending) { item_p item = NULL; hook_p hook = NULL; for (;;) { mtx_lock(&ng_btsocket_l2cap_queue_mtx); NG_BT_ITEMQ_DEQUEUE(&ng_btsocket_l2cap_queue, item); mtx_unlock(&ng_btsocket_l2cap_queue_mtx); if (item == NULL) break; NGI_GET_HOOK(item, hook); if (hook != NULL && NG_HOOK_NOT_VALID(hook)) goto drop; switch(item->el_flags & NGQF_TYPE) { case NGQF_DATA: { struct mbuf *m = NULL; NGI_GET_M(item, m); ng_btsocket_l2cap_data_input(m, hook); } break; case NGQF_MESG: { struct ng_mesg *msg = NULL; NGI_GET_MSG(item, msg); switch (msg->header.cmd) { case NGM_L2CAP_L2CA_CON: case NGM_L2CAP_L2CA_CON_RSP: case NGM_L2CAP_L2CA_CON_IND: case NGM_L2CAP_L2CA_CFG: case NGM_L2CAP_L2CA_CFG_RSP: case NGM_L2CAP_L2CA_CFG_IND: case NGM_L2CAP_L2CA_DISCON: case NGM_L2CAP_L2CA_DISCON_IND: case NGM_L2CAP_L2CA_WRITE: case NGM_L2CAP_L2CA_ENC_CHANGE: /* XXX FIXME add other L2CA messages */ ng_btsocket_l2cap_l2ca_msg_input(msg, hook); break; default: ng_btsocket_l2cap_default_msg_input(msg, hook); break; } } break; default: KASSERT(0, ("%s: invalid item type=%ld\n", __func__, (item->el_flags & NGQF_TYPE))); break; } drop: if (hook != NULL) NG_HOOK_UNREF(hook); NG_FREE_ITEM(item); } } /* ng_btsocket_l2cap_input */ /* * Route cleanup task. Gets scheduled when hook is disconnected. Here we * will find all sockets that use "invalid" hook and disconnect them. */ static void ng_btsocket_l2cap_rtclean(void *context, int pending) { ng_btsocket_l2cap_pcb_p pcb = NULL, pcb_next = NULL; ng_btsocket_l2cap_rtentry_p rt = NULL; mtx_lock(&ng_btsocket_l2cap_rt_mtx); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); /* * First disconnect all sockets that use "invalid" hook */ for (pcb = LIST_FIRST(&ng_btsocket_l2cap_sockets); pcb != NULL; ) { mtx_lock(&pcb->pcb_mtx); pcb_next = LIST_NEXT(pcb, next); if (pcb->rt != NULL && pcb->rt->hook != NULL && NG_HOOK_NOT_VALID(pcb->rt->hook)) { if (pcb->flags & NG_BTSOCKET_L2CAP_TIMO) ng_btsocket_l2cap_untimeout(pcb); pcb->so->so_error = ENETDOWN; pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); pcb->token = 0; pcb->cid = 0; pcb->rt = NULL; } mtx_unlock(&pcb->pcb_mtx); pcb = pcb_next; } /* * Now cleanup routing table */ for (rt = LIST_FIRST(&ng_btsocket_l2cap_rt); rt != NULL; ) { ng_btsocket_l2cap_rtentry_p rt_next = LIST_NEXT(rt, next); if (rt->hook != NULL && NG_HOOK_NOT_VALID(rt->hook)) { LIST_REMOVE(rt, next); NG_HOOK_SET_PRIVATE(rt->hook, NULL); NG_HOOK_UNREF(rt->hook); /* Remove extra reference */ bzero(rt, sizeof(*rt)); free(rt, M_NETGRAPH_BTSOCKET_L2CAP); } rt = rt_next; } mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); mtx_unlock(&ng_btsocket_l2cap_rt_mtx); } /* ng_btsocket_l2cap_rtclean */ /* * Initialize everything */ static void ng_btsocket_l2cap_init(void *arg __unused) { int error = 0; ng_btsocket_l2cap_node = NULL; ng_btsocket_l2cap_debug_level = NG_BTSOCKET_WARN_LEVEL; /* Register Netgraph node type */ error = ng_newtype(&typestruct); if (error != 0) { NG_BTSOCKET_L2CAP_ALERT( "%s: Could not register Netgraph node type, error=%d\n", __func__, error); return; } /* Create Netgrapg node */ error = ng_make_node_common(&typestruct, &ng_btsocket_l2cap_node); if (error != 0) { NG_BTSOCKET_L2CAP_ALERT( "%s: Could not create Netgraph node, error=%d\n", __func__, error); ng_btsocket_l2cap_node = NULL; return; } error = ng_name_node(ng_btsocket_l2cap_node, NG_BTSOCKET_L2CAP_NODE_TYPE); if (error != 0) { NG_BTSOCKET_L2CAP_ALERT( "%s: Could not name Netgraph node, error=%d\n", __func__, error); NG_NODE_UNREF(ng_btsocket_l2cap_node); ng_btsocket_l2cap_node = NULL; return; } /* Create input queue */ NG_BT_ITEMQ_INIT(&ng_btsocket_l2cap_queue, ifqmaxlen); mtx_init(&ng_btsocket_l2cap_queue_mtx, "btsocks_l2cap_queue_mtx", NULL, MTX_DEF); TASK_INIT(&ng_btsocket_l2cap_queue_task, 0, ng_btsocket_l2cap_input, NULL); /* Create list of sockets */ LIST_INIT(&ng_btsocket_l2cap_sockets); mtx_init(&ng_btsocket_l2cap_sockets_mtx, "btsocks_l2cap_sockets_mtx", NULL, MTX_DEF); /* Routing table */ LIST_INIT(&ng_btsocket_l2cap_rt); mtx_init(&ng_btsocket_l2cap_rt_mtx, "btsocks_l2cap_rt_mtx", NULL, MTX_DEF); TASK_INIT(&ng_btsocket_l2cap_rt_task, 0, ng_btsocket_l2cap_rtclean, NULL); } /* ng_btsocket_l2cap_init */ SYSINIT(ng_btsocket_l2cap_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ng_btsocket_l2cap_init, NULL); /* * Abort connection on socket */ void ng_btsocket_l2cap_abort(struct socket *so) { so->so_error = ECONNABORTED; (void)ng_btsocket_l2cap_disconnect(so); } /* ng_btsocket_l2cap_abort */ void ng_btsocket_l2cap_close(struct socket *so) { (void)ng_btsocket_l2cap_disconnect(so); } /* ng_btsocket_l2cap_close */ /* * Create and attach new socket */ int ng_btsocket_l2cap_attach(struct socket *so, int proto, struct thread *td) { static u_int32_t token = 0; ng_btsocket_l2cap_pcb_p pcb = so2l2cap_pcb(so); int error; /* Check socket and protocol */ if (ng_btsocket_l2cap_node == NULL) return (EPROTONOSUPPORT); if (so->so_type != SOCK_SEQPACKET) return (ESOCKTNOSUPPORT); #if 0 /* XXX sonewconn() calls "pru_attach" with proto == 0 */ if (proto != 0) if (proto != BLUETOOTH_PROTO_L2CAP) return (EPROTONOSUPPORT); #endif /* XXX */ if (pcb != NULL) return (EISCONN); /* Reserve send and receive space if it is not reserved yet */ if ((so->so_snd.sb_hiwat == 0) || (so->so_rcv.sb_hiwat == 0)) { error = soreserve(so, NG_BTSOCKET_L2CAP_SENDSPACE, NG_BTSOCKET_L2CAP_RECVSPACE); if (error != 0) return (error); } /* Allocate the PCB */ pcb = malloc(sizeof(*pcb), M_NETGRAPH_BTSOCKET_L2CAP, M_NOWAIT | M_ZERO); if (pcb == NULL) return (ENOMEM); /* Link the PCB and the socket */ so->so_pcb = (caddr_t) pcb; pcb->so = so; pcb->state = NG_BTSOCKET_L2CAP_CLOSED; /* Initialize PCB */ pcb->imtu = pcb->omtu = NG_L2CAP_MTU_DEFAULT; /* Default flow */ pcb->iflow.flags = 0x0; pcb->iflow.service_type = NG_HCI_SERVICE_TYPE_BEST_EFFORT; pcb->iflow.token_rate = 0xffffffff; /* maximum */ pcb->iflow.token_bucket_size = 0xffffffff; /* maximum */ pcb->iflow.peak_bandwidth = 0x00000000; /* maximum */ pcb->iflow.latency = 0xffffffff; /* don't care */ pcb->iflow.delay_variation = 0xffffffff; /* don't care */ bcopy(&pcb->iflow, &pcb->oflow, sizeof(pcb->oflow)); pcb->flush_timo = NG_L2CAP_FLUSH_TIMO_DEFAULT; pcb->link_timo = NG_L2CAP_LINK_TIMO_DEFAULT; /* * XXX Mark PCB mutex as DUPOK to prevent "duplicated lock of * the same type" message. When accepting new L2CAP connection * ng_btsocket_l2cap_process_l2ca_con_ind() holds both PCB mutexes * for "old" (accepting) PCB and "new" (created) PCB. */ mtx_init(&pcb->pcb_mtx, "btsocks_l2cap_pcb_mtx", NULL, MTX_DEF|MTX_DUPOK); callout_init_mtx(&pcb->timo, &pcb->pcb_mtx, 0); /* * Add the PCB to the list * * XXX FIXME VERY IMPORTANT! * * This is totally FUBAR. We could get here in two cases: * * 1) When user calls socket() * 2) When we need to accept new incoming connection and call * sonewconn() * * In the first case we must acquire ng_btsocket_l2cap_sockets_mtx. * In the second case we hold ng_btsocket_l2cap_sockets_mtx already. * So we now need to distinguish between these cases. From reading * /sys/kern/uipc_socket.c we can find out that sonewconn() calls * pru_attach with proto == 0 and td == NULL. For now use this fact * to figure out if we were called from socket() or from sonewconn(). */ if (td != NULL) mtx_lock(&ng_btsocket_l2cap_sockets_mtx); else mtx_assert(&ng_btsocket_l2cap_sockets_mtx, MA_OWNED); /* Set PCB token. Use ng_btsocket_l2cap_sockets_mtx for protection */ if (++ token == 0) token ++; pcb->token = token; LIST_INSERT_HEAD(&ng_btsocket_l2cap_sockets, pcb, next); if (td != NULL) mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (0); } /* ng_btsocket_l2cap_attach */ /* * Bind socket */ int ng_btsocket_l2cap_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_l2cap_pcb_t *pcb = NULL; struct sockaddr_l2cap *sa = (struct sockaddr_l2cap *) nam; int psm, error = 0; if (ng_btsocket_l2cap_node == NULL) return (EINVAL); /* Verify address */ if (sa == NULL) return (EINVAL); if (sa->l2cap_family != AF_BLUETOOTH) return (EAFNOSUPPORT); /*For the time being, Not support LE binding.*/ if ((sa->l2cap_len != sizeof(*sa))&& (sa->l2cap_len != sizeof(struct sockaddr_l2cap_compat))) return (EINVAL); psm = le16toh(sa->l2cap_psm); /* * Check if other socket has this address already (look for exact * match PSM and bdaddr) and assign socket address if it's available. * * Note: socket can be bound to ANY PSM (zero) thus allowing several * channels with the same PSM between the same pair of BD_ADDR'es. */ mtx_lock(&ng_btsocket_l2cap_sockets_mtx); LIST_FOREACH(pcb, &ng_btsocket_l2cap_sockets, next) if (psm != 0 && psm == pcb->psm && bcmp(&pcb->src, &sa->l2cap_bdaddr, sizeof(bdaddr_t)) == 0) break; if (pcb == NULL) { /* Set socket address */ pcb = so2l2cap_pcb(so); if (pcb != NULL) { bcopy(&sa->l2cap_bdaddr, &pcb->src, sizeof(pcb->src)); pcb->psm = psm; } else error = EINVAL; } else error = EADDRINUSE; mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); return (error); } /* ng_btsocket_l2cap_bind */ /* * Connect socket */ int ng_btsocket_l2cap_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_l2cap_pcb_t *pcb = so2l2cap_pcb(so); struct sockaddr_l2cap_compat *sal = (struct sockaddr_l2cap_compat *) nam; struct sockaddr_l2cap *sa = (struct sockaddr_l2cap *)nam; struct sockaddr_l2cap ba; ng_btsocket_l2cap_rtentry_t *rt = NULL; int have_src, error = 0; int idtype = NG_L2CAP_L2CA_IDTYPE_BREDR; /* Check socket */ if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_node == NULL) return (EINVAL); if (pcb->state == NG_BTSOCKET_L2CAP_CONNECTING) return (EINPROGRESS); /* Verify address */ if (sa == NULL) return (EINVAL); if (sa->l2cap_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if (sa->l2cap_len == sizeof(*sal)){ bcopy(sal, &ba, sizeof(*sal)); sa = &ba; sa->l2cap_len = sizeof(*sa); sa->l2cap_bdaddr_type = BDADDR_BREDR; } if (sa->l2cap_len != sizeof(*sa)) return (EINVAL); if ((sa->l2cap_psm && sa->l2cap_cid)) return EINVAL; if (bcmp(&sa->l2cap_bdaddr, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) return (EDESTADDRREQ); if((sa->l2cap_bdaddr_type == BDADDR_BREDR)&& (sa->l2cap_psm == 0)) return EDESTADDRREQ; if(sa->l2cap_bdaddr_type != BDADDR_BREDR){ if(sa->l2cap_cid == NG_L2CAP_ATT_CID){ idtype = NG_L2CAP_L2CA_IDTYPE_ATT; }else if (sa->l2cap_cid == NG_L2CAP_SMP_CID){ idtype =NG_L2CAP_L2CA_IDTYPE_SMP; }else{ //if cid == 0 idtype = NG_L2CAP_L2CA_IDTYPE_LE; // Not supported yet return EINVAL; } } if (pcb->psm != 0 && pcb->psm != le16toh(sa->l2cap_psm)) return (EINVAL); /* * Routing. Socket should be bound to some source address. The source * address can be ANY. Destination address must be set and it must not * be ANY. If source address is ANY then find first rtentry that has * src != dst. */ mtx_lock(&ng_btsocket_l2cap_rt_mtx); mtx_lock(&ng_btsocket_l2cap_sockets_mtx); mtx_lock(&pcb->pcb_mtx); /* Send destination address and PSM */ bcopy(&sa->l2cap_bdaddr, &pcb->dst, sizeof(pcb->dst)); pcb->psm = le16toh(sa->l2cap_psm); pcb->dsttype = sa->l2cap_bdaddr_type; pcb->cid = 0; pcb->idtype = idtype; pcb->rt = NULL; have_src = bcmp(&pcb->src, NG_HCI_BDADDR_ANY, sizeof(pcb->src)); LIST_FOREACH(rt, &ng_btsocket_l2cap_rt, next) { if (rt->hook == NULL || NG_HOOK_NOT_VALID(rt->hook)) continue; /* Match src and dst */ if (have_src) { if (bcmp(&pcb->src, &rt->src, sizeof(rt->src)) == 0) break; } else { if (bcmp(&pcb->dst, &rt->src, sizeof(rt->src)) != 0) break; } } if (rt != NULL) { pcb->rt = rt; if (!have_src){ bcopy(&rt->src, &pcb->src, sizeof(pcb->src)); pcb->srctype = (sa->l2cap_bdaddr_type == BDADDR_BREDR)? BDADDR_BREDR : BDADDR_LE_PUBLIC; } } else error = EHOSTUNREACH; /* * Send L2CA_Connect request */ if (error == 0) { error = ng_btsocket_l2cap_send_l2ca_con_req(pcb); if (error == 0) { pcb->flags |= NG_BTSOCKET_L2CAP_CLIENT; pcb->state = NG_BTSOCKET_L2CAP_CONNECTING; soisconnecting(pcb->so); ng_btsocket_l2cap_timeout(pcb); } } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); mtx_unlock(&ng_btsocket_l2cap_rt_mtx); return (error); } /* ng_btsocket_l2cap_connect */ /* * Process ioctl's calls on socket */ int ng_btsocket_l2cap_control(struct socket *so, u_long cmd, void *data, struct ifnet *ifp, struct thread *td) { return (EINVAL); } /* ng_btsocket_l2cap_control */ /* * Process getsockopt/setsockopt system calls */ int ng_btsocket_l2cap_ctloutput(struct socket *so, struct sockopt *sopt) { ng_btsocket_l2cap_pcb_p pcb = so2l2cap_pcb(so); int error = 0; ng_l2cap_cfg_opt_val_t v; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_node == NULL) return (EINVAL); if (sopt->sopt_level != SOL_L2CAP) return (0); mtx_lock(&pcb->pcb_mtx); switch (sopt->sopt_dir) { case SOPT_GET: switch (sopt->sopt_name) { case SO_L2CAP_IMTU: /* get incoming MTU */ error = sooptcopyout(sopt, &pcb->imtu, sizeof(pcb->imtu)); break; case SO_L2CAP_OMTU: /* get outgoing (peer incoming) MTU */ error = sooptcopyout(sopt, &pcb->omtu, sizeof(pcb->omtu)); break; case SO_L2CAP_IFLOW: /* get incoming flow spec. */ error = sooptcopyout(sopt, &pcb->iflow, sizeof(pcb->iflow)); break; case SO_L2CAP_OFLOW: /* get outgoing flow spec. */ error = sooptcopyout(sopt, &pcb->oflow, sizeof(pcb->oflow)); break; case SO_L2CAP_FLUSH: /* get flush timeout */ error = sooptcopyout(sopt, &pcb->flush_timo, sizeof(pcb->flush_timo)); break; case SO_L2CAP_ENCRYPTED: /* get encrypt required */ error = sooptcopyout(sopt, &pcb->need_encrypt, sizeof(pcb->need_encrypt)); break; default: error = ENOPROTOOPT; break; } break; case SOPT_SET: /* * XXX * We do not allow to change these parameters while socket is * connected or we are in the process of creating a connection. * May be this should indicate re-configuration of the open * channel? */ if (pcb->state != NG_BTSOCKET_L2CAP_CLOSED) { error = EACCES; break; } switch (sopt->sopt_name) { case SO_L2CAP_IMTU: /* set incoming MTU */ error = sooptcopyin(sopt, &v, sizeof(v), sizeof(v.mtu)); if (error == 0) pcb->imtu = v.mtu; break; case SO_L2CAP_OFLOW: /* set outgoing flow spec. */ error = sooptcopyin(sopt, &v, sizeof(v),sizeof(v.flow)); if (error == 0) bcopy(&v.flow, &pcb->oflow, sizeof(pcb->oflow)); break; case SO_L2CAP_FLUSH: /* set flush timeout */ error = sooptcopyin(sopt, &v, sizeof(v), sizeof(v.flush_timo)); if (error == 0) pcb->flush_timo = v.flush_timo; break; case SO_L2CAP_ENCRYPTED: /*set connect encryption opt*/ if((pcb->state != NG_BTSOCKET_L2CAP_OPEN) && (pcb->state != NG_BTSOCKET_L2CAP_W4_ENC_CHANGE)){ error = sooptcopyin(sopt, &v, sizeof(v), sizeof(v.encryption)); if(error == 0) pcb->need_encrypt = (v.encryption)?1:0; }else{ error = EINVAL; } break; default: error = ENOPROTOOPT; break; } break; default: error = EINVAL; break; } mtx_unlock(&pcb->pcb_mtx); return (error); } /* ng_btsocket_l2cap_ctloutput */ /* * Detach and destroy socket */ void ng_btsocket_l2cap_detach(struct socket *so) { ng_btsocket_l2cap_pcb_p pcb = so2l2cap_pcb(so); KASSERT(pcb != NULL, ("ng_btsocket_l2cap_detach: pcb == NULL")); if (ng_btsocket_l2cap_node == NULL) return; mtx_lock(&ng_btsocket_l2cap_sockets_mtx); mtx_lock(&pcb->pcb_mtx); /* XXX what to do with pending request? */ if (pcb->flags & NG_BTSOCKET_L2CAP_TIMO) ng_btsocket_l2cap_untimeout(pcb); if (pcb->state != NG_BTSOCKET_L2CAP_CLOSED && pcb->state != NG_BTSOCKET_L2CAP_DISCONNECTING) /* Send disconnect request with "zero" token */ ng_btsocket_l2cap_send_l2ca_discon_req(0, pcb); pcb->state = NG_BTSOCKET_L2CAP_CLOSED; LIST_REMOVE(pcb, next); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_sockets_mtx); mtx_destroy(&pcb->pcb_mtx); bzero(pcb, sizeof(*pcb)); free(pcb, M_NETGRAPH_BTSOCKET_L2CAP); soisdisconnected(so); so->so_pcb = NULL; } /* ng_btsocket_l2cap_detach */ /* * Disconnect socket */ int ng_btsocket_l2cap_disconnect(struct socket *so) { ng_btsocket_l2cap_pcb_p pcb = so2l2cap_pcb(so); int error = 0; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_node == NULL) return (EINVAL); mtx_lock(&pcb->pcb_mtx); if (pcb->state == NG_BTSOCKET_L2CAP_DISCONNECTING) { mtx_unlock(&pcb->pcb_mtx); return (EINPROGRESS); } if (pcb->state != NG_BTSOCKET_L2CAP_CLOSED) { /* XXX FIXME what to do with pending request? */ if (pcb->flags & NG_BTSOCKET_L2CAP_TIMO) ng_btsocket_l2cap_untimeout(pcb); error = ng_btsocket_l2cap_send_l2ca_discon_req(pcb->token, pcb); if (error == 0) { pcb->state = NG_BTSOCKET_L2CAP_DISCONNECTING; soisdisconnecting(so); ng_btsocket_l2cap_timeout(pcb); } /* XXX FIXME what to do if error != 0 */ } mtx_unlock(&pcb->pcb_mtx); return (error); } /* ng_btsocket_l2cap_disconnect */ /* * Listen on socket */ int ng_btsocket_l2cap_listen(struct socket *so, int backlog, struct thread *td) { ng_btsocket_l2cap_pcb_p pcb = so2l2cap_pcb(so); int error; SOCK_LOCK(so); error = solisten_proto_check(so); if (error != 0) goto out; if (pcb == NULL) { solisten_proto_abort(so); error = EINVAL; goto out; } if (ng_btsocket_l2cap_node == NULL) { solisten_proto_abort(so); error = EINVAL; goto out; } if (pcb->psm == 0) { solisten_proto_abort(so); error = EADDRNOTAVAIL; goto out; } solisten_proto(so, backlog); out: SOCK_UNLOCK(so); return (error); } /* ng_btsocket_listen */ -static int -ng_btsocket_l2cap_peeraddr1(struct socket *so, struct sockaddr_l2cap *sa) +/* + * Return peer address for getpeername(2) or for accept(2). For the latter + * case no extra work to do here, socket must be connected and ready. + */ +int +ng_btsocket_l2cap_peeraddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_l2cap_pcb_p pcb = so2l2cap_pcb(so); + struct sockaddr_l2cap *l2cap = (struct sockaddr_l2cap *)sa; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_node == NULL) return (EINVAL); - *sa = (struct sockaddr_l2cap ){ + *l2cap = (struct sockaddr_l2cap ){ .l2cap_len = sizeof(struct sockaddr_l2cap), .l2cap_family = AF_BLUETOOTH, .l2cap_psm = htole16(pcb->psm), }; - bcopy(&pcb->dst, &sa->l2cap_bdaddr, sizeof(sa->l2cap_bdaddr)); + bcopy(&pcb->dst, &l2cap->l2cap_bdaddr, sizeof(l2cap->l2cap_bdaddr)); switch(pcb->idtype){ case NG_L2CAP_L2CA_IDTYPE_ATT: - sa->l2cap_cid = NG_L2CAP_ATT_CID; + l2cap->l2cap_cid = NG_L2CAP_ATT_CID; break; case NG_L2CAP_L2CA_IDTYPE_SMP: - sa->l2cap_cid = NG_L2CAP_SMP_CID; + l2cap->l2cap_cid = NG_L2CAP_SMP_CID; break; default: - sa->l2cap_cid = 0; + l2cap->l2cap_cid = 0; break; } - sa->l2cap_bdaddr_type = pcb->dsttype; + l2cap->l2cap_bdaddr_type = pcb->dsttype; return (0); } -/* - * Get peer address - */ -int -ng_btsocket_l2cap_peeraddr(struct socket *so, struct sockaddr **nam) -{ - struct sockaddr_l2cap sa; - int error; - - error = ng_btsocket_l2cap_peeraddr1(so, &sa); - if (error != 0) - return (error); - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); - - return ((*nam == NULL)? ENOMEM : 0); -} - -/* - * Accept connection on socket. Nothing to do here, socket must be connected - * and ready, so just return peer address and be done with it. - */ -int -ng_btsocket_l2cap_accept(struct socket *so, struct sockaddr *sa) -{ - if (ng_btsocket_l2cap_node == NULL) - return (EINVAL); - - return (ng_btsocket_l2cap_peeraddr1(so, (struct sockaddr_l2cap *)sa)); -} - /* * Send data to socket */ int ng_btsocket_l2cap_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { ng_btsocket_l2cap_pcb_t *pcb = so2l2cap_pcb(so); int error = 0; if (ng_btsocket_l2cap_node == NULL) { error = ENETDOWN; goto drop; } /* Check socket and input */ if (pcb == NULL || m == NULL || control != NULL) { error = EINVAL; goto drop; } mtx_lock(&pcb->pcb_mtx); /* Make sure socket is connected */ if (pcb->state != NG_BTSOCKET_L2CAP_OPEN) { mtx_unlock(&pcb->pcb_mtx); error = ENOTCONN; goto drop; } /* Check route */ if (pcb->rt == NULL || pcb->rt->hook == NULL || NG_HOOK_NOT_VALID(pcb->rt->hook)) { mtx_unlock(&pcb->pcb_mtx); error = ENETDOWN; goto drop; } /* Check packet size against outgoing (peer's incoming) MTU) */ if (m->m_pkthdr.len > pcb->omtu) { NG_BTSOCKET_L2CAP_ERR( "%s: Packet too big, len=%d, omtu=%d\n", __func__, m->m_pkthdr.len, pcb->omtu); mtx_unlock(&pcb->pcb_mtx); error = EMSGSIZE; goto drop; } /* * First put packet on socket send queue. Then check if we have * pending timeout. If we do not have timeout then we must send * packet and schedule timeout. Otherwise do nothing and wait for * L2CA_WRITE_RSP. */ sbappendrecord(&pcb->so->so_snd, m); m = NULL; if (!(pcb->flags & NG_BTSOCKET_L2CAP_TIMO)) { error = ng_btsocket_l2cap_send2(pcb); if (error == 0) ng_btsocket_l2cap_timeout(pcb); else sbdroprecord(&pcb->so->so_snd); /* XXX */ } mtx_unlock(&pcb->pcb_mtx); drop: NG_FREE_M(m); /* checks for != NULL */ NG_FREE_M(control); return (error); } /* ng_btsocket_l2cap_send */ /* * Send first packet in the socket queue to the L2CAP layer */ static int ng_btsocket_l2cap_send2(ng_btsocket_l2cap_pcb_p pcb) { struct mbuf *m = NULL; ng_l2cap_l2ca_hdr_t *hdr = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (sbavail(&pcb->so->so_snd) == 0) return (EINVAL); /* XXX */ m = m_dup(pcb->so->so_snd.sb_mb, M_NOWAIT); if (m == NULL) return (ENOBUFS); /* Create L2CA packet header */ M_PREPEND(m, sizeof(*hdr), M_NOWAIT); if (m != NULL) if (m->m_len < sizeof(*hdr)) m = m_pullup(m, sizeof(*hdr)); if (m == NULL) { NG_BTSOCKET_L2CAP_ERR( "%s: Failed to create L2CA packet header\n", __func__); return (ENOBUFS); } hdr = mtod(m, ng_l2cap_l2ca_hdr_t *); hdr->token = pcb->token; hdr->length = m->m_pkthdr.len - sizeof(*hdr); hdr->lcid = pcb->cid; hdr->idtype = pcb->idtype; NG_BTSOCKET_L2CAP_INFO( "%s: Sending packet: len=%d, length=%d, lcid=%d, token=%d, state=%d\n", __func__, m->m_pkthdr.len, hdr->length, hdr->lcid, hdr->token, pcb->state); /* * If we got here than we have successfully creates new L2CAP * data packet and now we can send it to the L2CAP layer */ NG_SEND_DATA_ONLY(error, pcb->rt->hook, m); return (error); } /* ng_btsocket_l2cap_send2 */ /* * Get socket address */ - int -ng_btsocket_l2cap_sockaddr(struct socket *so, struct sockaddr **nam) +ng_btsocket_l2cap_sockaddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_l2cap_pcb_p pcb = so2l2cap_pcb(so); - struct sockaddr_l2cap sa; + struct sockaddr_l2cap *l2cap = (struct sockaddr_l2cap *)sa; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_node == NULL) return (EINVAL); - bcopy(&pcb->src, &sa.l2cap_bdaddr, sizeof(sa.l2cap_bdaddr)); - sa.l2cap_psm = htole16(pcb->psm); - sa.l2cap_len = sizeof(sa); - sa.l2cap_family = AF_BLUETOOTH; - sa.l2cap_cid = 0; - sa.l2cap_bdaddr_type = pcb->srctype; - - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); + *l2cap = (struct sockaddr_l2cap ){ + .l2cap_len = sizeof(struct sockaddr_l2cap), + .l2cap_family = AF_BLUETOOTH, + .l2cap_psm = htole16(pcb->psm), + .l2cap_bdaddr_type = pcb->srctype, + }; + bcopy(&pcb->src, &l2cap->l2cap_bdaddr, sizeof(l2cap->l2cap_bdaddr)); - return ((*nam == NULL)? ENOMEM : 0); -} /* ng_btsocket_l2cap_sockaddr */ + return (0); +} /***************************************************************************** ***************************************************************************** ** Misc. functions ***************************************************************************** *****************************************************************************/ /* * Look for the socket that listens on given PSM and bdaddr. Returns exact or * close match (if any). Caller must hold ng_btsocket_l2cap_sockets_mtx. */ static ng_btsocket_l2cap_pcb_p ng_btsocket_l2cap_pcb_by_addr(bdaddr_p bdaddr, int psm) { ng_btsocket_l2cap_pcb_p p = NULL, p1 = NULL; mtx_assert(&ng_btsocket_l2cap_sockets_mtx, MA_OWNED); LIST_FOREACH(p, &ng_btsocket_l2cap_sockets, next) { if (p->so == NULL || !SOLISTENING(p->so) || p->psm != psm) continue; if (bcmp(&p->src, bdaddr, sizeof(p->src)) == 0) break; if (bcmp(&p->src, NG_HCI_BDADDR_ANY, sizeof(p->src)) == 0) p1 = p; } return ((p != NULL)? p : p1); } /* ng_btsocket_l2cap_pcb_by_addr */ /* * Look for the socket that has given token. * Caller must hold ng_btsocket_l2cap_sockets_mtx. */ static ng_btsocket_l2cap_pcb_p ng_btsocket_l2cap_pcb_by_token(u_int32_t token) { ng_btsocket_l2cap_pcb_p p = NULL; if (token == 0) return (NULL); mtx_assert(&ng_btsocket_l2cap_sockets_mtx, MA_OWNED); LIST_FOREACH(p, &ng_btsocket_l2cap_sockets, next) if (p->token == token) break; return (p); } /* ng_btsocket_l2cap_pcb_by_token */ /* * Look for the socket that assigned to given source address and channel ID. * Caller must hold ng_btsocket_l2cap_sockets_mtx */ static ng_btsocket_l2cap_pcb_p ng_btsocket_l2cap_pcb_by_cid(bdaddr_p src, int cid, int idtype) { ng_btsocket_l2cap_pcb_p p = NULL; mtx_assert(&ng_btsocket_l2cap_sockets_mtx, MA_OWNED); LIST_FOREACH(p, &ng_btsocket_l2cap_sockets, next){ if (p->cid == cid && bcmp(src, &p->src, sizeof(p->src)) == 0&& p->idtype == idtype) break; } return (p); } /* ng_btsocket_l2cap_pcb_by_cid */ /* * Set timeout on socket */ static void ng_btsocket_l2cap_timeout(ng_btsocket_l2cap_pcb_p pcb) { mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (!(pcb->flags & NG_BTSOCKET_L2CAP_TIMO)) { pcb->flags |= NG_BTSOCKET_L2CAP_TIMO; callout_reset(&pcb->timo, bluetooth_l2cap_ertx_timeout(), ng_btsocket_l2cap_process_timeout, pcb); } else KASSERT(0, ("%s: Duplicated socket timeout?!\n", __func__)); } /* ng_btsocket_l2cap_timeout */ /* * Unset timeout on socket */ static void ng_btsocket_l2cap_untimeout(ng_btsocket_l2cap_pcb_p pcb) { mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->flags & NG_BTSOCKET_L2CAP_TIMO) { callout_stop(&pcb->timo); pcb->flags &= ~NG_BTSOCKET_L2CAP_TIMO; } else KASSERT(0, ("%s: No socket timeout?!\n", __func__)); } /* ng_btsocket_l2cap_untimeout */ /* * Process timeout on socket */ static void ng_btsocket_l2cap_process_timeout(void *xpcb) { ng_btsocket_l2cap_pcb_p pcb = (ng_btsocket_l2cap_pcb_p) xpcb; mtx_assert(&pcb->pcb_mtx, MA_OWNED); pcb->flags &= ~NG_BTSOCKET_L2CAP_TIMO; pcb->so->so_error = ETIMEDOUT; switch (pcb->state) { case NG_BTSOCKET_L2CAP_CONNECTING: case NG_BTSOCKET_L2CAP_CONFIGURING: case NG_BTSOCKET_L2CAP_W4_ENC_CHANGE: /* Send disconnect request with "zero" token */ if (pcb->cid != 0) ng_btsocket_l2cap_send_l2ca_discon_req(0, pcb); /* ... and close the socket */ pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); break; case NG_BTSOCKET_L2CAP_OPEN: /* Send timeout - drop packet and wakeup sender */ sbdroprecord(&pcb->so->so_snd); sowwakeup(pcb->so); break; case NG_BTSOCKET_L2CAP_DISCONNECTING: /* Disconnect timeout - disconnect the socket anyway */ pcb->state = NG_BTSOCKET_L2CAP_CLOSED; soisdisconnected(pcb->so); break; default: NG_BTSOCKET_L2CAP_ERR( "%s: Invalid socket state=%d\n", __func__, pcb->state); break; } } /* ng_btsocket_l2cap_process_timeout */ /* * Translate HCI/L2CAP error code into "errno" code * XXX Note: Some L2CAP and HCI error codes have the same value, but * different meaning */ static int ng_btsocket_l2cap_result2errno(int result) { switch (result) { case 0x00: /* No error */ return (0); case 0x01: /* Unknown HCI command */ return (ENODEV); case 0x02: /* No connection */ return (ENOTCONN); case 0x03: /* Hardware failure */ return (EIO); case 0x04: /* Page timeout */ return (EHOSTDOWN); case 0x05: /* Authentication failure */ case 0x06: /* Key missing */ case 0x18: /* Pairing not allowed */ case 0x21: /* Role change not allowed */ case 0x24: /* LMP PSU not allowed */ case 0x25: /* Encryption mode not acceptable */ case 0x26: /* Unit key used */ return (EACCES); case 0x07: /* Memory full */ return (ENOMEM); case 0x08: /* Connection timeout */ case 0x10: /* Host timeout */ case 0x22: /* LMP response timeout */ case 0xee: /* HCI timeout */ case 0xeeee: /* L2CAP timeout */ return (ETIMEDOUT); case 0x09: /* Max number of connections */ case 0x0a: /* Max number of SCO connections to a unit */ return (EMLINK); case 0x0b: /* ACL connection already exists */ return (EEXIST); case 0x0c: /* Command disallowed */ return (EBUSY); case 0x0d: /* Host rejected due to limited resources */ case 0x0e: /* Host rejected due to securiity reasons */ case 0x0f: /* Host rejected due to remote unit is a personal unit */ case 0x1b: /* SCO offset rejected */ case 0x1c: /* SCO interval rejected */ case 0x1d: /* SCO air mode rejected */ return (ECONNREFUSED); case 0x11: /* Unsupported feature or parameter value */ case 0x19: /* Unknown LMP PDU */ case 0x1a: /* Unsupported remote feature */ case 0x20: /* Unsupported LMP parameter value */ case 0x27: /* QoS is not supported */ case 0x29: /* Paring with unit key not supported */ return (EOPNOTSUPP); case 0x12: /* Invalid HCI command parameter */ case 0x1e: /* Invalid LMP parameters */ return (EINVAL); case 0x13: /* Other end terminated connection: User ended connection */ case 0x14: /* Other end terminated connection: Low resources */ case 0x15: /* Other end terminated connection: About to power off */ return (ECONNRESET); case 0x16: /* Connection terminated by local host */ return (ECONNABORTED); #if 0 /* XXX not yet */ case 0x17: /* Repeated attempts */ case 0x1f: /* Unspecified error */ case 0x23: /* LMP error transaction collision */ case 0x28: /* Instant passed */ #endif } return (ENOSYS); } /* ng_btsocket_l2cap_result2errno */ diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c b/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c index 6ddefe4be156..c95b0c486eb1 100644 --- a/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c +++ b/sys/netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c @@ -1,1357 +1,1354 @@ /* * ng_btsocket_l2cap_raw.c */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_l2cap_raw.c,v 1.12 2003/09/14 23:29:06 max Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* MALLOC define */ #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_BTSOCKET_L2CAP_RAW, "netgraph_btsocks_l2cap_raw", "Netgraph Bluetooth raw L2CAP sockets"); #else #define M_NETGRAPH_BTSOCKET_L2CAP_RAW M_NETGRAPH #endif /* NG_SEPARATE_MALLOC */ /* Netgraph node methods */ static ng_constructor_t ng_btsocket_l2cap_raw_node_constructor; static ng_rcvmsg_t ng_btsocket_l2cap_raw_node_rcvmsg; static ng_shutdown_t ng_btsocket_l2cap_raw_node_shutdown; static ng_newhook_t ng_btsocket_l2cap_raw_node_newhook; static ng_connect_t ng_btsocket_l2cap_raw_node_connect; static ng_rcvdata_t ng_btsocket_l2cap_raw_node_rcvdata; static ng_disconnect_t ng_btsocket_l2cap_raw_node_disconnect; static void ng_btsocket_l2cap_raw_input (void *, int); static void ng_btsocket_l2cap_raw_rtclean (void *, int); static void ng_btsocket_l2cap_raw_get_token (u_int32_t *); static int ng_btsocket_l2cap_raw_send_ngmsg (hook_p, int, void *, int); static int ng_btsocket_l2cap_raw_send_sync_ngmsg (ng_btsocket_l2cap_raw_pcb_p, int, void *, int); #define ng_btsocket_l2cap_raw_wakeup_input_task() \ taskqueue_enqueue(taskqueue_swi, &ng_btsocket_l2cap_raw_queue_task) #define ng_btsocket_l2cap_raw_wakeup_route_task() \ taskqueue_enqueue(taskqueue_swi, &ng_btsocket_l2cap_raw_rt_task) /* Netgraph type descriptor */ static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_BTSOCKET_L2CAP_RAW_NODE_TYPE, .constructor = ng_btsocket_l2cap_raw_node_constructor, .rcvmsg = ng_btsocket_l2cap_raw_node_rcvmsg, .shutdown = ng_btsocket_l2cap_raw_node_shutdown, .newhook = ng_btsocket_l2cap_raw_node_newhook, .connect = ng_btsocket_l2cap_raw_node_connect, .rcvdata = ng_btsocket_l2cap_raw_node_rcvdata, .disconnect = ng_btsocket_l2cap_raw_node_disconnect, }; /* Globals */ extern int ifqmaxlen; static u_int32_t ng_btsocket_l2cap_raw_debug_level; static u_int32_t ng_btsocket_l2cap_raw_ioctl_timeout; static node_p ng_btsocket_l2cap_raw_node; static struct ng_bt_itemq ng_btsocket_l2cap_raw_queue; static struct mtx ng_btsocket_l2cap_raw_queue_mtx; static struct task ng_btsocket_l2cap_raw_queue_task; static LIST_HEAD(, ng_btsocket_l2cap_raw_pcb) ng_btsocket_l2cap_raw_sockets; static struct mtx ng_btsocket_l2cap_raw_sockets_mtx; static u_int32_t ng_btsocket_l2cap_raw_token; static struct mtx ng_btsocket_l2cap_raw_token_mtx; static LIST_HEAD(, ng_btsocket_l2cap_rtentry) ng_btsocket_l2cap_raw_rt; static struct mtx ng_btsocket_l2cap_raw_rt_mtx; static struct task ng_btsocket_l2cap_raw_rt_task; static struct timeval ng_btsocket_l2cap_raw_lasttime; static int ng_btsocket_l2cap_raw_curpps; /* Sysctl tree */ SYSCTL_DECL(_net_bluetooth_l2cap_sockets); static SYSCTL_NODE(_net_bluetooth_l2cap_sockets, OID_AUTO, raw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth raw L2CAP sockets family"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_raw, OID_AUTO, debug_level, CTLFLAG_RW, &ng_btsocket_l2cap_raw_debug_level, NG_BTSOCKET_WARN_LEVEL, "Bluetooth raw L2CAP sockets debug level"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_raw, OID_AUTO, ioctl_timeout, CTLFLAG_RW, &ng_btsocket_l2cap_raw_ioctl_timeout, 5, "Bluetooth raw L2CAP sockets ioctl timeout"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_raw, OID_AUTO, queue_len, CTLFLAG_RD, &ng_btsocket_l2cap_raw_queue.len, 0, "Bluetooth raw L2CAP sockets input queue length"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_raw, OID_AUTO, queue_maxlen, CTLFLAG_RD, &ng_btsocket_l2cap_raw_queue.maxlen, 0, "Bluetooth raw L2CAP sockets input queue max. length"); SYSCTL_UINT(_net_bluetooth_l2cap_sockets_raw, OID_AUTO, queue_drops, CTLFLAG_RD, &ng_btsocket_l2cap_raw_queue.drops, 0, "Bluetooth raw L2CAP sockets input queue drops"); /* Debug */ #define NG_BTSOCKET_L2CAP_RAW_INFO \ if (ng_btsocket_l2cap_raw_debug_level >= NG_BTSOCKET_INFO_LEVEL && \ ppsratecheck(&ng_btsocket_l2cap_raw_lasttime, &ng_btsocket_l2cap_raw_curpps, 1)) \ printf #define NG_BTSOCKET_L2CAP_RAW_WARN \ if (ng_btsocket_l2cap_raw_debug_level >= NG_BTSOCKET_WARN_LEVEL && \ ppsratecheck(&ng_btsocket_l2cap_raw_lasttime, &ng_btsocket_l2cap_raw_curpps, 1)) \ printf #define NG_BTSOCKET_L2CAP_RAW_ERR \ if (ng_btsocket_l2cap_raw_debug_level >= NG_BTSOCKET_ERR_LEVEL && \ ppsratecheck(&ng_btsocket_l2cap_raw_lasttime, &ng_btsocket_l2cap_raw_curpps, 1)) \ printf #define NG_BTSOCKET_L2CAP_RAW_ALERT \ if (ng_btsocket_l2cap_raw_debug_level >= NG_BTSOCKET_ALERT_LEVEL && \ ppsratecheck(&ng_btsocket_l2cap_raw_lasttime, &ng_btsocket_l2cap_raw_curpps, 1)) \ printf /***************************************************************************** ***************************************************************************** ** Netgraph node interface ***************************************************************************** *****************************************************************************/ /* * Netgraph node constructor. Do not allow to create node of this type. */ static int ng_btsocket_l2cap_raw_node_constructor(node_p node) { return (EINVAL); } /* ng_btsocket_l2cap_raw_node_constructor */ /* * Do local shutdown processing. Let old node go and create new fresh one. */ static int ng_btsocket_l2cap_raw_node_shutdown(node_p node) { int error = 0; NG_NODE_UNREF(node); /* Create new node */ error = ng_make_node_common(&typestruct, &ng_btsocket_l2cap_raw_node); if (error != 0) { NG_BTSOCKET_L2CAP_RAW_ALERT( "%s: Could not create Netgraph node, error=%d\n", __func__, error); ng_btsocket_l2cap_raw_node = NULL; return (error); } error = ng_name_node(ng_btsocket_l2cap_raw_node, NG_BTSOCKET_L2CAP_RAW_NODE_TYPE); if (error != 0) { NG_BTSOCKET_L2CAP_RAW_ALERT( "%s: Could not name Netgraph node, error=%d\n", __func__, error); NG_NODE_UNREF(ng_btsocket_l2cap_raw_node); ng_btsocket_l2cap_raw_node = NULL; return (error); } return (0); } /* ng_btsocket_l2cap_raw_node_shutdown */ /* * We allow any hook to be connected to the node. */ static int ng_btsocket_l2cap_raw_node_newhook(node_p node, hook_p hook, char const *name) { return (0); } /* ng_btsocket_l2cap_raw_node_newhook */ /* * Just say "YEP, that's OK by me!" */ static int ng_btsocket_l2cap_raw_node_connect(hook_p hook) { NG_HOOK_SET_PRIVATE(hook, NULL); NG_HOOK_REF(hook); /* Keep extra reference to the hook */ return (0); } /* ng_btsocket_l2cap_raw_node_connect */ /* * Hook disconnection. Schedule route cleanup task */ static int ng_btsocket_l2cap_raw_node_disconnect(hook_p hook) { /* * If hook has private information than we must have this hook in * the routing table and must schedule cleaning for the routing table. * Otherwise hook was connected but we never got "hook_info" message, * so we have never added this hook to the routing table and it save * to just delete it. */ if (NG_HOOK_PRIVATE(hook) != NULL) return (ng_btsocket_l2cap_raw_wakeup_route_task()); NG_HOOK_UNREF(hook); /* Remove extra reference */ return (0); } /* ng_btsocket_l2cap_raw_node_disconnect */ /* * Process incoming messages */ static int ng_btsocket_l2cap_raw_node_rcvmsg(node_p node, item_p item, hook_p hook) { struct ng_mesg *msg = NGI_MSG(item); /* item still has message */ int error = 0; if (msg != NULL && msg->header.typecookie == NGM_L2CAP_COOKIE) { /* * NGM_L2CAP_NODE_HOOK_INFO is special message initiated by * L2CAP layer. Ignore all other messages if they are not * replies or token is zero */ if (msg->header.cmd != NGM_L2CAP_NODE_HOOK_INFO) { if (msg->header.token == 0 || !(msg->header.flags & NGF_RESP)) { NG_FREE_ITEM(item); return (0); } } mtx_lock(&ng_btsocket_l2cap_raw_queue_mtx); if (NG_BT_ITEMQ_FULL(&ng_btsocket_l2cap_raw_queue)) { NG_BTSOCKET_L2CAP_RAW_ERR( "%s: Input queue is full\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_l2cap_raw_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { if (hook != NULL) { NG_HOOK_REF(hook); NGI_SET_HOOK(item, hook); } NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_l2cap_raw_queue, item); error = ng_btsocket_l2cap_raw_wakeup_input_task(); } mtx_unlock(&ng_btsocket_l2cap_raw_queue_mtx); } else { NG_FREE_ITEM(item); error = EINVAL; } return (error); } /* ng_btsocket_l2cap_raw_node_rcvmsg */ /* * Receive data on a hook */ static int ng_btsocket_l2cap_raw_node_rcvdata(hook_p hook, item_p item) { NG_FREE_ITEM(item); return (EINVAL); } /* ng_btsocket_l2cap_raw_node_rcvdata */ /***************************************************************************** ***************************************************************************** ** Socket interface ***************************************************************************** *****************************************************************************/ /* * L2CAP sockets input routine */ static void ng_btsocket_l2cap_raw_input(void *context, int pending) { item_p item = NULL; hook_p hook = NULL; struct ng_mesg *msg = NULL; for (;;) { mtx_lock(&ng_btsocket_l2cap_raw_queue_mtx); NG_BT_ITEMQ_DEQUEUE(&ng_btsocket_l2cap_raw_queue, item); mtx_unlock(&ng_btsocket_l2cap_raw_queue_mtx); if (item == NULL) break; KASSERT((item->el_flags & NGQF_TYPE) == NGQF_MESG, ("%s: invalid item type=%ld\n", __func__, (item->el_flags & NGQF_TYPE))); NGI_GET_MSG(item, msg); NGI_GET_HOOK(item, hook); NG_FREE_ITEM(item); switch (msg->header.cmd) { case NGM_L2CAP_NODE_HOOK_INFO: { ng_btsocket_l2cap_rtentry_t *rt = NULL; if (hook == NULL || NG_HOOK_NOT_VALID(hook) || msg->header.arglen != sizeof(bdaddr_t)) break; if (bcmp(msg->data, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) break; rt = (ng_btsocket_l2cap_rtentry_t *) NG_HOOK_PRIVATE(hook); if (rt == NULL) { rt = malloc(sizeof(*rt), M_NETGRAPH_BTSOCKET_L2CAP_RAW, M_NOWAIT|M_ZERO); if (rt == NULL) break; NG_HOOK_SET_PRIVATE(hook, rt); mtx_lock(&ng_btsocket_l2cap_raw_rt_mtx); LIST_INSERT_HEAD(&ng_btsocket_l2cap_raw_rt, rt, next); } else mtx_lock(&ng_btsocket_l2cap_raw_rt_mtx); bcopy(msg->data, &rt->src, sizeof(rt->src)); rt->hook = hook; NG_BTSOCKET_L2CAP_RAW_INFO( "%s: Updating hook \"%s\", src bdaddr=%x:%x:%x:%x:%x:%x\n", __func__, NG_HOOK_NAME(hook), rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0]); mtx_unlock(&ng_btsocket_l2cap_raw_rt_mtx); } break; case NGM_L2CAP_NODE_GET_FLAGS: case NGM_L2CAP_NODE_GET_DEBUG: case NGM_L2CAP_NODE_GET_CON_LIST: case NGM_L2CAP_NODE_GET_CHAN_LIST: case NGM_L2CAP_NODE_GET_AUTO_DISCON_TIMO: case NGM_L2CAP_L2CA_PING: case NGM_L2CAP_L2CA_GET_INFO: { ng_btsocket_l2cap_raw_pcb_p pcb = NULL; mtx_lock(&ng_btsocket_l2cap_raw_sockets_mtx); LIST_FOREACH(pcb,&ng_btsocket_l2cap_raw_sockets,next) { mtx_lock(&pcb->pcb_mtx); if (pcb->token == msg->header.token) { pcb->msg = msg; msg = NULL; wakeup(&pcb->msg); mtx_unlock(&pcb->pcb_mtx); break; } mtx_unlock(&pcb->pcb_mtx); } mtx_unlock(&ng_btsocket_l2cap_raw_sockets_mtx); } break; default: NG_BTSOCKET_L2CAP_RAW_WARN( "%s: Unknown message, cmd=%d\n", __func__, msg->header.cmd); break; } if (hook != NULL) NG_HOOK_UNREF(hook); /* remove extra reference */ NG_FREE_MSG(msg); /* Checks for msg != NULL */ } } /* ng_btsocket_l2cap_raw_input */ /* * Route cleanup task. Gets scheduled when hook is disconnected. Here we * will find all sockets that use "invalid" hook and disconnect them. */ static void ng_btsocket_l2cap_raw_rtclean(void *context, int pending) { ng_btsocket_l2cap_raw_pcb_p pcb = NULL; ng_btsocket_l2cap_rtentry_p rt = NULL; /* * First disconnect all sockets that use "invalid" hook */ mtx_lock(&ng_btsocket_l2cap_raw_sockets_mtx); LIST_FOREACH(pcb, &ng_btsocket_l2cap_raw_sockets, next) { mtx_lock(&pcb->pcb_mtx); if (pcb->rt != NULL && pcb->rt->hook != NULL && NG_HOOK_NOT_VALID(pcb->rt->hook)) { if (pcb->so != NULL && pcb->so->so_state & SS_ISCONNECTED) soisdisconnected(pcb->so); pcb->rt = NULL; } mtx_unlock(&pcb->pcb_mtx); } mtx_unlock(&ng_btsocket_l2cap_raw_sockets_mtx); /* * Now cleanup routing table */ mtx_lock(&ng_btsocket_l2cap_raw_rt_mtx); for (rt = LIST_FIRST(&ng_btsocket_l2cap_raw_rt); rt != NULL; ) { ng_btsocket_l2cap_rtentry_p rt_next = LIST_NEXT(rt, next); if (rt->hook != NULL && NG_HOOK_NOT_VALID(rt->hook)) { LIST_REMOVE(rt, next); NG_HOOK_SET_PRIVATE(rt->hook, NULL); NG_HOOK_UNREF(rt->hook); /* Remove extra reference */ bzero(rt, sizeof(*rt)); free(rt, M_NETGRAPH_BTSOCKET_L2CAP_RAW); } rt = rt_next; } mtx_unlock(&ng_btsocket_l2cap_raw_rt_mtx); } /* ng_btsocket_l2cap_raw_rtclean */ /* * Initialize everything */ static void ng_btsocket_l2cap_raw_init(void *arg __unused) { int error = 0; ng_btsocket_l2cap_raw_node = NULL; ng_btsocket_l2cap_raw_debug_level = NG_BTSOCKET_WARN_LEVEL; ng_btsocket_l2cap_raw_ioctl_timeout = 5; /* Register Netgraph node type */ error = ng_newtype(&typestruct); if (error != 0) { NG_BTSOCKET_L2CAP_RAW_ALERT( "%s: Could not register Netgraph node type, error=%d\n", __func__, error); return; } /* Create Netgrapg node */ error = ng_make_node_common(&typestruct, &ng_btsocket_l2cap_raw_node); if (error != 0) { NG_BTSOCKET_L2CAP_RAW_ALERT( "%s: Could not create Netgraph node, error=%d\n", __func__, error); ng_btsocket_l2cap_raw_node = NULL; return; } error = ng_name_node(ng_btsocket_l2cap_raw_node, NG_BTSOCKET_L2CAP_RAW_NODE_TYPE); if (error != 0) { NG_BTSOCKET_L2CAP_RAW_ALERT( "%s: Could not name Netgraph node, error=%d\n", __func__, error); NG_NODE_UNREF(ng_btsocket_l2cap_raw_node); ng_btsocket_l2cap_raw_node = NULL; return; } /* Create input queue */ NG_BT_ITEMQ_INIT(&ng_btsocket_l2cap_raw_queue, ifqmaxlen); mtx_init(&ng_btsocket_l2cap_raw_queue_mtx, "btsocks_l2cap_raw_queue_mtx", NULL, MTX_DEF); TASK_INIT(&ng_btsocket_l2cap_raw_queue_task, 0, ng_btsocket_l2cap_raw_input, NULL); /* Create list of sockets */ LIST_INIT(&ng_btsocket_l2cap_raw_sockets); mtx_init(&ng_btsocket_l2cap_raw_sockets_mtx, "btsocks_l2cap_raw_sockets_mtx", NULL, MTX_DEF); /* Tokens */ ng_btsocket_l2cap_raw_token = 0; mtx_init(&ng_btsocket_l2cap_raw_token_mtx, "btsocks_l2cap_raw_token_mtx", NULL, MTX_DEF); /* Routing table */ LIST_INIT(&ng_btsocket_l2cap_raw_rt); mtx_init(&ng_btsocket_l2cap_raw_rt_mtx, "btsocks_l2cap_raw_rt_mtx", NULL, MTX_DEF); TASK_INIT(&ng_btsocket_l2cap_raw_rt_task, 0, ng_btsocket_l2cap_raw_rtclean, NULL); } /* ng_btsocket_l2cap_raw_init */ SYSINIT(ng_btsocket_l2cap_raw_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ng_btsocket_l2cap_raw_init, NULL); /* * Abort connection on socket */ void ng_btsocket_l2cap_raw_abort(struct socket *so) { (void)ng_btsocket_l2cap_raw_disconnect(so); } /* ng_btsocket_l2cap_raw_abort */ void ng_btsocket_l2cap_raw_close(struct socket *so) { (void)ng_btsocket_l2cap_raw_disconnect(so); } /* ng_btsocket_l2cap_raw_close */ /* * Create and attach new socket */ int ng_btsocket_l2cap_raw_attach(struct socket *so, int proto, struct thread *td) { ng_btsocket_l2cap_raw_pcb_p pcb = so2l2cap_raw_pcb(so); int error; if (pcb != NULL) return (EISCONN); if (ng_btsocket_l2cap_raw_node == NULL) return (EPROTONOSUPPORT); if (so->so_type != SOCK_RAW) return (ESOCKTNOSUPPORT); /* Reserve send and receive space if it is not reserved yet */ error = soreserve(so, NG_BTSOCKET_L2CAP_RAW_SENDSPACE, NG_BTSOCKET_L2CAP_RAW_RECVSPACE); if (error != 0) return (error); /* Allocate the PCB */ pcb = malloc(sizeof(*pcb), M_NETGRAPH_BTSOCKET_L2CAP_RAW, M_NOWAIT|M_ZERO); if (pcb == NULL) return (ENOMEM); /* Link the PCB and the socket */ so->so_pcb = (caddr_t) pcb; pcb->so = so; if (priv_check(td, PRIV_NETBLUETOOTH_RAW) == 0) pcb->flags |= NG_BTSOCKET_L2CAP_RAW_PRIVILEGED; mtx_init(&pcb->pcb_mtx, "btsocks_l2cap_raw_pcb_mtx", NULL, MTX_DEF); /* Add the PCB to the list */ mtx_lock(&ng_btsocket_l2cap_raw_sockets_mtx); LIST_INSERT_HEAD(&ng_btsocket_l2cap_raw_sockets, pcb, next); mtx_unlock(&ng_btsocket_l2cap_raw_sockets_mtx); return (0); } /* ng_btsocket_l2cap_raw_attach */ /* * Bind socket */ int ng_btsocket_l2cap_raw_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_l2cap_raw_pcb_t *pcb = so2l2cap_raw_pcb(so); struct sockaddr_l2cap *sa = (struct sockaddr_l2cap *) nam; ng_btsocket_l2cap_rtentry_t *rt = NULL; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_raw_node == NULL) return (EINVAL); if (sa == NULL) return (EINVAL); if (sa->l2cap_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if((sa->l2cap_len != sizeof(*sa))&& (sa->l2cap_len != sizeof(struct sockaddr_l2cap_compat))) return (EINVAL); if (bcmp(&sa->l2cap_bdaddr, NG_HCI_BDADDR_ANY, sizeof(sa->l2cap_bdaddr)) != 0) { mtx_lock(&ng_btsocket_l2cap_raw_rt_mtx); LIST_FOREACH(rt, &ng_btsocket_l2cap_raw_rt, next) { if (rt->hook == NULL || NG_HOOK_NOT_VALID(rt->hook)) continue; if (bcmp(&sa->l2cap_bdaddr, &rt->src, sizeof(rt->src)) == 0) break; } mtx_unlock(&ng_btsocket_l2cap_raw_rt_mtx); if (rt == NULL) return (ENETDOWN); } else rt = NULL; mtx_lock(&pcb->pcb_mtx); bcopy(&sa->l2cap_bdaddr, &pcb->src, sizeof(pcb->src)); pcb->rt = rt; mtx_unlock(&pcb->pcb_mtx); return (0); } /* ng_btsocket_l2cap_raw_bind */ /* * Connect socket */ int ng_btsocket_l2cap_raw_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_l2cap_raw_pcb_t *pcb = so2l2cap_raw_pcb(so); struct sockaddr_l2cap *sa = (struct sockaddr_l2cap *) nam; ng_btsocket_l2cap_rtentry_t *rt = NULL; int error; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_raw_node == NULL) return (EINVAL); if (sa == NULL) return (EINVAL); if (sa->l2cap_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if((sa->l2cap_len != sizeof(*sa))&& (sa->l2cap_len != sizeof(struct sockaddr_l2cap_compat))) return (EINVAL); if (bcmp(&sa->l2cap_bdaddr, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) return (EINVAL); mtx_lock(&pcb->pcb_mtx); bcopy(&sa->l2cap_bdaddr, &pcb->dst, sizeof(pcb->dst)); if (bcmp(&pcb->src, &pcb->dst, sizeof(pcb->src)) == 0) { mtx_unlock(&pcb->pcb_mtx); return (EADDRNOTAVAIL); } /* * If there is route already - use it */ if (pcb->rt != NULL) { soisconnected(so); mtx_unlock(&pcb->pcb_mtx); return (0); } /* * Find the first hook that does not match specified destination address */ mtx_lock(&ng_btsocket_l2cap_raw_rt_mtx); LIST_FOREACH(rt, &ng_btsocket_l2cap_raw_rt, next) { if (rt->hook == NULL || NG_HOOK_NOT_VALID(rt->hook)) continue; if (bcmp(&pcb->dst, &rt->src, sizeof(rt->src)) != 0) break; } if (rt != NULL) { soisconnected(so); pcb->rt = rt; bcopy(&rt->src, &pcb->src, sizeof(pcb->src)); error = 0; } else error = ENETDOWN; mtx_unlock(&ng_btsocket_l2cap_raw_rt_mtx); mtx_unlock(&pcb->pcb_mtx); return (error); } /* ng_btsocket_l2cap_raw_connect */ /* * Process ioctl's calls on socket */ int ng_btsocket_l2cap_raw_control(struct socket *so, u_long cmd, void *data, struct ifnet *ifp, struct thread *td) { ng_btsocket_l2cap_raw_pcb_p pcb = so2l2cap_raw_pcb(so); struct ng_mesg *msg = NULL; int error = 0; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_raw_node == NULL) return (EINVAL); mtx_lock(&pcb->pcb_mtx); /* Check if we route info */ if (pcb->rt == NULL) { mtx_unlock(&pcb->pcb_mtx); return (EHOSTUNREACH); } /* Check if we have pending ioctl() */ if (pcb->token != 0) { mtx_unlock(&pcb->pcb_mtx); return (EBUSY); } switch (cmd) { case SIOC_L2CAP_NODE_GET_FLAGS: { struct ng_btsocket_l2cap_raw_node_flags *p = (struct ng_btsocket_l2cap_raw_node_flags *) data; error = ng_btsocket_l2cap_raw_send_sync_ngmsg(pcb, NGM_L2CAP_NODE_GET_FLAGS, &p->flags, sizeof(p->flags)); } break; case SIOC_L2CAP_NODE_GET_DEBUG: { struct ng_btsocket_l2cap_raw_node_debug *p = (struct ng_btsocket_l2cap_raw_node_debug *) data; error = ng_btsocket_l2cap_raw_send_sync_ngmsg(pcb, NGM_L2CAP_NODE_GET_DEBUG, &p->debug, sizeof(p->debug)); } break; case SIOC_L2CAP_NODE_SET_DEBUG: { struct ng_btsocket_l2cap_raw_node_debug *p = (struct ng_btsocket_l2cap_raw_node_debug *) data; if (pcb->flags & NG_BTSOCKET_L2CAP_RAW_PRIVILEGED) error = ng_btsocket_l2cap_raw_send_ngmsg(pcb->rt->hook, NGM_L2CAP_NODE_SET_DEBUG, &p->debug, sizeof(p->debug)); else error = EPERM; } break; case SIOC_L2CAP_NODE_GET_CON_LIST: { struct ng_btsocket_l2cap_raw_con_list *p = (struct ng_btsocket_l2cap_raw_con_list *) data; ng_l2cap_node_con_list_ep *p1 = NULL; ng_l2cap_node_con_ep *p2 = NULL; if (p->num_connections == 0 || p->num_connections > NG_L2CAP_MAX_CON_NUM || p->connections == NULL) { mtx_unlock(&pcb->pcb_mtx); return (EINVAL); } NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_NODE_GET_CON_LIST, 0, M_NOWAIT); if (msg == NULL) { mtx_unlock(&pcb->pcb_mtx); return (ENOMEM); } ng_btsocket_l2cap_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_raw_node, msg, pcb->rt->hook, 0); if (error != 0) { pcb->token = 0; mtx_unlock(&pcb->pcb_mtx); return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "l2ctl", ng_btsocket_l2cap_raw_ioctl_timeout * hz); pcb->token = 0; if (error != 0) { mtx_unlock(&pcb->pcb_mtx); return (error); } msg = pcb->msg; pcb->msg = NULL; mtx_unlock(&pcb->pcb_mtx); if (msg != NULL && msg->header.cmd == NGM_L2CAP_NODE_GET_CON_LIST) { /* Return data back to user space */ p1 = (ng_l2cap_node_con_list_ep *)(msg->data); p2 = (ng_l2cap_node_con_ep *)(p1 + 1); p->num_connections = min(p->num_connections, p1->num_connections); if (p->num_connections > 0) error = copyout((caddr_t) p2, (caddr_t) p->connections, p->num_connections * sizeof(*p2)); } else error = EINVAL; NG_FREE_MSG(msg); /* checks for != NULL */ return (error); } /* NOTREACHED */ case SIOC_L2CAP_NODE_GET_CHAN_LIST: { struct ng_btsocket_l2cap_raw_chan_list *p = (struct ng_btsocket_l2cap_raw_chan_list *) data; ng_l2cap_node_chan_list_ep *p1 = NULL; ng_l2cap_node_chan_ep *p2 = NULL; if (p->num_channels == 0 || p->num_channels > NG_L2CAP_MAX_CHAN_NUM || p->channels == NULL) { mtx_unlock(&pcb->pcb_mtx); return (EINVAL); } NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_NODE_GET_CHAN_LIST, 0, M_NOWAIT); if (msg == NULL) { mtx_unlock(&pcb->pcb_mtx); return (ENOMEM); } ng_btsocket_l2cap_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_raw_node, msg, pcb->rt->hook, 0); if (error != 0) { pcb->token = 0; mtx_unlock(&pcb->pcb_mtx); return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "l2ctl", ng_btsocket_l2cap_raw_ioctl_timeout * hz); pcb->token = 0; if (error != 0) { mtx_unlock(&pcb->pcb_mtx); return (error); } msg = pcb->msg; pcb->msg = NULL; mtx_unlock(&pcb->pcb_mtx); if (msg != NULL && msg->header.cmd == NGM_L2CAP_NODE_GET_CHAN_LIST) { /* Return data back to user space */ p1 = (ng_l2cap_node_chan_list_ep *)(msg->data); p2 = (ng_l2cap_node_chan_ep *)(p1 + 1); p->num_channels = min(p->num_channels, p1->num_channels); if (p->num_channels > 0) error = copyout((caddr_t) p2, (caddr_t) p->channels, p->num_channels * sizeof(*p2)); } else error = EINVAL; NG_FREE_MSG(msg); /* checks for != NULL */ return (error); } /* NOTREACHED */ case SIOC_L2CAP_L2CA_PING: { struct ng_btsocket_l2cap_raw_ping *p = (struct ng_btsocket_l2cap_raw_ping *) data; ng_l2cap_l2ca_ping_ip *ip = NULL; ng_l2cap_l2ca_ping_op *op = NULL; if ((p->echo_size != 0 && p->echo_data == NULL) || p->echo_size > NG_L2CAP_MAX_ECHO_SIZE) { mtx_unlock(&pcb->pcb_mtx); return (EINVAL); } NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_L2CA_PING, sizeof(*ip) + p->echo_size, M_NOWAIT); if (msg == NULL) { mtx_unlock(&pcb->pcb_mtx); return (ENOMEM); } ng_btsocket_l2cap_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; ip = (ng_l2cap_l2ca_ping_ip *)(msg->data); bcopy(&pcb->dst, &ip->bdaddr, sizeof(ip->bdaddr)); ip->echo_size = p->echo_size; if (ip->echo_size > 0) { mtx_unlock(&pcb->pcb_mtx); error = copyin(p->echo_data, ip + 1, p->echo_size); mtx_lock(&pcb->pcb_mtx); if (error != 0) { NG_FREE_MSG(msg); pcb->token = 0; mtx_unlock(&pcb->pcb_mtx); return (error); } } NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_raw_node, msg, pcb->rt->hook, 0); if (error != 0) { pcb->token = 0; mtx_unlock(&pcb->pcb_mtx); return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "l2ctl", bluetooth_l2cap_rtx_timeout()); pcb->token = 0; if (error != 0) { mtx_unlock(&pcb->pcb_mtx); return (error); } msg = pcb->msg; pcb->msg = NULL; mtx_unlock(&pcb->pcb_mtx); if (msg != NULL && msg->header.cmd == NGM_L2CAP_L2CA_PING) { /* Return data back to the user space */ op = (ng_l2cap_l2ca_ping_op *)(msg->data); p->result = op->result; p->echo_size = min(p->echo_size, op->echo_size); if (p->echo_size > 0) error = copyout(op + 1, p->echo_data, p->echo_size); } else error = EINVAL; NG_FREE_MSG(msg); /* checks for != NULL */ return (error); } /* NOTREACHED */ case SIOC_L2CAP_L2CA_GET_INFO: { struct ng_btsocket_l2cap_raw_get_info *p = (struct ng_btsocket_l2cap_raw_get_info *) data; ng_l2cap_l2ca_get_info_ip *ip = NULL; ng_l2cap_l2ca_get_info_op *op = NULL; if (!(pcb->flags & NG_BTSOCKET_L2CAP_RAW_PRIVILEGED)) { mtx_unlock(&pcb->pcb_mtx); return (EPERM); } if (p->info_size != 0 && p->info_data == NULL) { mtx_unlock(&pcb->pcb_mtx); return (EINVAL); } NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, NGM_L2CAP_L2CA_GET_INFO, sizeof(*ip) + p->info_size, M_NOWAIT); if (msg == NULL) { mtx_unlock(&pcb->pcb_mtx); return (ENOMEM); } ng_btsocket_l2cap_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; ip = (ng_l2cap_l2ca_get_info_ip *)(msg->data); bcopy(&pcb->dst, &ip->bdaddr, sizeof(ip->bdaddr)); ip->info_type = p->info_type; NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_raw_node, msg, pcb->rt->hook, 0); if (error != 0) { pcb->token = 0; mtx_unlock(&pcb->pcb_mtx); return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "l2ctl", bluetooth_l2cap_rtx_timeout()); pcb->token = 0; if (error != 0) { mtx_unlock(&pcb->pcb_mtx); return (error); } msg = pcb->msg; pcb->msg = NULL; mtx_unlock(&pcb->pcb_mtx); if (msg != NULL && msg->header.cmd == NGM_L2CAP_L2CA_GET_INFO) { /* Return data back to the user space */ op = (ng_l2cap_l2ca_get_info_op *)(msg->data); p->result = op->result; p->info_size = min(p->info_size, op->info_size); if (p->info_size > 0) error = copyout(op + 1, p->info_data, p->info_size); } else error = EINVAL; NG_FREE_MSG(msg); /* checks for != NULL */ return (error); } /* NOTREACHED */ case SIOC_L2CAP_NODE_GET_AUTO_DISCON_TIMO: { struct ng_btsocket_l2cap_raw_auto_discon_timo *p = (struct ng_btsocket_l2cap_raw_auto_discon_timo *) data; error = ng_btsocket_l2cap_raw_send_sync_ngmsg(pcb, NGM_L2CAP_NODE_GET_AUTO_DISCON_TIMO, &p->timeout, sizeof(p->timeout)); } break; case SIOC_L2CAP_NODE_SET_AUTO_DISCON_TIMO: { struct ng_btsocket_l2cap_raw_auto_discon_timo *p = (struct ng_btsocket_l2cap_raw_auto_discon_timo *) data; if (pcb->flags & NG_BTSOCKET_L2CAP_RAW_PRIVILEGED) error = ng_btsocket_l2cap_raw_send_ngmsg(pcb->rt->hook, NGM_L2CAP_NODE_SET_AUTO_DISCON_TIMO, &p->timeout, sizeof(p->timeout)); else error = EPERM; } break; default: error = EINVAL; break; } mtx_unlock(&pcb->pcb_mtx); return (error); } /* ng_btsocket_l2cap_raw_control */ /* * Detach and destroy socket */ void ng_btsocket_l2cap_raw_detach(struct socket *so) { ng_btsocket_l2cap_raw_pcb_p pcb = so2l2cap_raw_pcb(so); KASSERT(pcb != NULL, ("nt_btsocket_l2cap_raw_detach: pcb == NULL")); if (ng_btsocket_l2cap_raw_node == NULL) return; mtx_lock(&ng_btsocket_l2cap_raw_sockets_mtx); mtx_lock(&pcb->pcb_mtx); LIST_REMOVE(pcb, next); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_l2cap_raw_sockets_mtx); mtx_destroy(&pcb->pcb_mtx); bzero(pcb, sizeof(*pcb)); free(pcb, M_NETGRAPH_BTSOCKET_L2CAP_RAW); so->so_pcb = NULL; } /* ng_btsocket_l2cap_raw_detach */ /* * Disconnect socket */ int ng_btsocket_l2cap_raw_disconnect(struct socket *so) { ng_btsocket_l2cap_raw_pcb_p pcb = so2l2cap_raw_pcb(so); if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_raw_node == NULL) return (EINVAL); mtx_lock(&pcb->pcb_mtx); pcb->rt = NULL; soisdisconnected(so); mtx_unlock(&pcb->pcb_mtx); return (0); } /* ng_btsocket_l2cap_raw_disconnect */ /* * Get peer address */ int -ng_btsocket_l2cap_raw_peeraddr(struct socket *so, struct sockaddr **nam) +ng_btsocket_l2cap_raw_peeraddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_l2cap_raw_pcb_p pcb = so2l2cap_raw_pcb(so); - struct sockaddr_l2cap sa; + struct sockaddr_l2cap *l2cap = (struct sockaddr_l2cap *)sa; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_raw_node == NULL) return (EINVAL); + *l2cap = (struct sockaddr_l2cap ){ + .l2cap_len = sizeof(struct sockaddr_l2cap), + .l2cap_family = AF_BLUETOOTH, + .l2cap_bdaddr_type = BDADDR_BREDR, + }; + mtx_lock(&pcb->pcb_mtx); - bcopy(&pcb->dst, &sa.l2cap_bdaddr, sizeof(sa.l2cap_bdaddr)); + bcopy(&pcb->dst, &l2cap->l2cap_bdaddr, sizeof(l2cap->l2cap_bdaddr)); mtx_unlock(&pcb->pcb_mtx); - sa.l2cap_psm = 0; - sa.l2cap_len = sizeof(sa); - sa.l2cap_family = AF_BLUETOOTH; - sa.l2cap_cid = 0; - sa.l2cap_bdaddr_type = BDADDR_BREDR; - - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); - - return ((*nam == NULL)? ENOMEM : 0); -} /* ng_btsocket_l2cap_raw_peeraddr */ + return (0); +} /* * Send data to socket */ int ng_btsocket_l2cap_raw_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { NG_FREE_M(m); /* Checks for m != NULL */ NG_FREE_M(control); return (EOPNOTSUPP); } /* ng_btsocket_l2cap_raw_send */ /* * Get socket address */ int -ng_btsocket_l2cap_raw_sockaddr(struct socket *so, struct sockaddr **nam) +ng_btsocket_l2cap_raw_sockaddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_l2cap_raw_pcb_p pcb = so2l2cap_raw_pcb(so); - struct sockaddr_l2cap sa; + struct sockaddr_l2cap *l2cap = (struct sockaddr_l2cap *)sa; if (pcb == NULL) return (EINVAL); if (ng_btsocket_l2cap_raw_node == NULL) return (EINVAL); + *l2cap = (struct sockaddr_l2cap ){ + .l2cap_len = sizeof(struct sockaddr_l2cap), + .l2cap_family = AF_BLUETOOTH, + .l2cap_bdaddr_type = BDADDR_BREDR, + }; + mtx_lock(&pcb->pcb_mtx); - bcopy(&pcb->src, &sa.l2cap_bdaddr, sizeof(sa.l2cap_bdaddr)); + bcopy(&pcb->src, &l2cap->l2cap_bdaddr, sizeof(l2cap->l2cap_bdaddr)); mtx_unlock(&pcb->pcb_mtx); - sa.l2cap_psm = 0; - sa.l2cap_len = sizeof(sa); - sa.l2cap_family = AF_BLUETOOTH; - sa.l2cap_cid = 0; - sa.l2cap_bdaddr_type = BDADDR_BREDR; - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); - - return ((*nam == NULL)? ENOMEM : 0); -} /* ng_btsocket_l2cap_raw_sockaddr */ + return (0); +} /* * Get next token */ static void ng_btsocket_l2cap_raw_get_token(u_int32_t *token) { mtx_lock(&ng_btsocket_l2cap_raw_token_mtx); if (++ ng_btsocket_l2cap_raw_token == 0) ng_btsocket_l2cap_raw_token = 1; *token = ng_btsocket_l2cap_raw_token; mtx_unlock(&ng_btsocket_l2cap_raw_token_mtx); } /* ng_btsocket_l2cap_raw_get_token */ /* * Send Netgraph message to the node - do not expect reply */ static int ng_btsocket_l2cap_raw_send_ngmsg(hook_p hook, int cmd, void *arg, int arglen) { struct ng_mesg *msg = NULL; int error = 0; NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, cmd, arglen, M_NOWAIT); if (msg == NULL) return (ENOMEM); if (arg != NULL && arglen > 0) bcopy(arg, msg->data, arglen); NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_raw_node, msg, hook, 0); return (error); } /* ng_btsocket_l2cap_raw_send_ngmsg */ /* * Send Netgraph message to the node (no data) and wait for reply */ static int ng_btsocket_l2cap_raw_send_sync_ngmsg(ng_btsocket_l2cap_raw_pcb_p pcb, int cmd, void *rsp, int rsplen) { struct ng_mesg *msg = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); NG_MKMESSAGE(msg, NGM_L2CAP_COOKIE, cmd, 0, M_NOWAIT); if (msg == NULL) return (ENOMEM); ng_btsocket_l2cap_raw_get_token(&msg->header.token); pcb->token = msg->header.token; pcb->msg = NULL; NG_SEND_MSG_HOOK(error, ng_btsocket_l2cap_raw_node, msg, pcb->rt->hook, 0); if (error != 0) { pcb->token = 0; return (error); } error = msleep(&pcb->msg, &pcb->pcb_mtx, PZERO|PCATCH, "l2ctl", ng_btsocket_l2cap_raw_ioctl_timeout * hz); pcb->token = 0; if (error != 0) return (error); if (pcb->msg != NULL && pcb->msg->header.cmd == cmd) bcopy(pcb->msg->data, rsp, rsplen); else error = EINVAL; NG_FREE_MSG(pcb->msg); /* checks for != NULL */ return (0); } /* ng_btsocket_l2cap_raw_send_sync_ngmsg */ diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c b/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c index af542f3c258b..4780227a66f1 100644 --- a/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c +++ b/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c @@ -1,3569 +1,3546 @@ /* * ng_btsocket_rfcomm.c */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2003 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_rfcomm.c,v 1.28 2003/09/14 23:29:06 max Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* MALLOC define */ #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_BTSOCKET_RFCOMM, "netgraph_btsocks_rfcomm", "Netgraph Bluetooth RFCOMM sockets"); #else #define M_NETGRAPH_BTSOCKET_RFCOMM M_NETGRAPH #endif /* NG_SEPARATE_MALLOC */ /* Debug */ #define NG_BTSOCKET_RFCOMM_INFO \ if (ng_btsocket_rfcomm_debug_level >= NG_BTSOCKET_INFO_LEVEL && \ ppsratecheck(&ng_btsocket_rfcomm_lasttime, &ng_btsocket_rfcomm_curpps, 1)) \ printf #define NG_BTSOCKET_RFCOMM_WARN \ if (ng_btsocket_rfcomm_debug_level >= NG_BTSOCKET_WARN_LEVEL && \ ppsratecheck(&ng_btsocket_rfcomm_lasttime, &ng_btsocket_rfcomm_curpps, 1)) \ printf #define NG_BTSOCKET_RFCOMM_ERR \ if (ng_btsocket_rfcomm_debug_level >= NG_BTSOCKET_ERR_LEVEL && \ ppsratecheck(&ng_btsocket_rfcomm_lasttime, &ng_btsocket_rfcomm_curpps, 1)) \ printf #define NG_BTSOCKET_RFCOMM_ALERT \ if (ng_btsocket_rfcomm_debug_level >= NG_BTSOCKET_ALERT_LEVEL && \ ppsratecheck(&ng_btsocket_rfcomm_lasttime, &ng_btsocket_rfcomm_curpps, 1)) \ printf #define ALOT 0x7fff /* Local prototypes */ static int ng_btsocket_rfcomm_upcall (struct socket *so, void *arg, int waitflag); static void ng_btsocket_rfcomm_sessions_task (void *ctx, int pending); static void ng_btsocket_rfcomm_session_task (ng_btsocket_rfcomm_session_p s); #define ng_btsocket_rfcomm_task_wakeup() \ taskqueue_enqueue(taskqueue_swi_giant, &ng_btsocket_rfcomm_task) static ng_btsocket_rfcomm_pcb_p ng_btsocket_rfcomm_connect_ind (ng_btsocket_rfcomm_session_p s, int channel); static void ng_btsocket_rfcomm_connect_cfm (ng_btsocket_rfcomm_session_p s); static int ng_btsocket_rfcomm_session_create (ng_btsocket_rfcomm_session_p *sp, struct socket *l2so, bdaddr_p src, bdaddr_p dst, struct thread *td); static int ng_btsocket_rfcomm_session_accept (ng_btsocket_rfcomm_session_p s0); static int ng_btsocket_rfcomm_session_connect (ng_btsocket_rfcomm_session_p s); static int ng_btsocket_rfcomm_session_receive (ng_btsocket_rfcomm_session_p s); static int ng_btsocket_rfcomm_session_send (ng_btsocket_rfcomm_session_p s); static void ng_btsocket_rfcomm_session_clean (ng_btsocket_rfcomm_session_p s); static void ng_btsocket_rfcomm_session_process_pcb (ng_btsocket_rfcomm_session_p s); static ng_btsocket_rfcomm_session_p ng_btsocket_rfcomm_session_by_addr (bdaddr_p src, bdaddr_p dst); static int ng_btsocket_rfcomm_receive_frame (ng_btsocket_rfcomm_session_p s, struct mbuf *m0); static int ng_btsocket_rfcomm_receive_sabm (ng_btsocket_rfcomm_session_p s, int dlci); static int ng_btsocket_rfcomm_receive_disc (ng_btsocket_rfcomm_session_p s, int dlci); static int ng_btsocket_rfcomm_receive_ua (ng_btsocket_rfcomm_session_p s, int dlci); static int ng_btsocket_rfcomm_receive_dm (ng_btsocket_rfcomm_session_p s, int dlci); static int ng_btsocket_rfcomm_receive_uih (ng_btsocket_rfcomm_session_p s, int dlci, int pf, struct mbuf *m0); static int ng_btsocket_rfcomm_receive_mcc (ng_btsocket_rfcomm_session_p s, struct mbuf *m0); static int ng_btsocket_rfcomm_receive_test (ng_btsocket_rfcomm_session_p s, struct mbuf *m0); static int ng_btsocket_rfcomm_receive_fc (ng_btsocket_rfcomm_session_p s, struct mbuf *m0); static int ng_btsocket_rfcomm_receive_msc (ng_btsocket_rfcomm_session_p s, struct mbuf *m0); static int ng_btsocket_rfcomm_receive_rpn (ng_btsocket_rfcomm_session_p s, struct mbuf *m0); static int ng_btsocket_rfcomm_receive_rls (ng_btsocket_rfcomm_session_p s, struct mbuf *m0); static int ng_btsocket_rfcomm_receive_pn (ng_btsocket_rfcomm_session_p s, struct mbuf *m0); static void ng_btsocket_rfcomm_set_pn (ng_btsocket_rfcomm_pcb_p pcb, u_int8_t cr, u_int8_t flow_control, u_int8_t credits, u_int16_t mtu); static int ng_btsocket_rfcomm_send_command (ng_btsocket_rfcomm_session_p s, u_int8_t type, u_int8_t dlci); static int ng_btsocket_rfcomm_send_uih (ng_btsocket_rfcomm_session_p s, u_int8_t address, u_int8_t pf, u_int8_t credits, struct mbuf *data); static int ng_btsocket_rfcomm_send_msc (ng_btsocket_rfcomm_pcb_p pcb); static int ng_btsocket_rfcomm_send_pn (ng_btsocket_rfcomm_pcb_p pcb); static int ng_btsocket_rfcomm_send_credits (ng_btsocket_rfcomm_pcb_p pcb); static int ng_btsocket_rfcomm_pcb_send (ng_btsocket_rfcomm_pcb_p pcb, int limit); static void ng_btsocket_rfcomm_pcb_kill (ng_btsocket_rfcomm_pcb_p pcb, int error); static ng_btsocket_rfcomm_pcb_p ng_btsocket_rfcomm_pcb_by_dlci (ng_btsocket_rfcomm_session_p s, int dlci); static ng_btsocket_rfcomm_pcb_p ng_btsocket_rfcomm_pcb_listener (bdaddr_p src, int channel); static void ng_btsocket_rfcomm_timeout (ng_btsocket_rfcomm_pcb_p pcb); static void ng_btsocket_rfcomm_untimeout (ng_btsocket_rfcomm_pcb_p pcb); static void ng_btsocket_rfcomm_process_timeout (void *xpcb); static struct mbuf * ng_btsocket_rfcomm_prepare_packet (struct sockbuf *sb, int length); /* Globals */ extern int ifqmaxlen; static u_int32_t ng_btsocket_rfcomm_debug_level; static u_int32_t ng_btsocket_rfcomm_timo; struct task ng_btsocket_rfcomm_task; static LIST_HEAD(, ng_btsocket_rfcomm_session) ng_btsocket_rfcomm_sessions; static struct mtx ng_btsocket_rfcomm_sessions_mtx; static LIST_HEAD(, ng_btsocket_rfcomm_pcb) ng_btsocket_rfcomm_sockets; static struct mtx ng_btsocket_rfcomm_sockets_mtx; static struct timeval ng_btsocket_rfcomm_lasttime; static int ng_btsocket_rfcomm_curpps; /* Sysctl tree */ SYSCTL_DECL(_net_bluetooth_rfcomm_sockets); static SYSCTL_NODE(_net_bluetooth_rfcomm_sockets, OID_AUTO, stream, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth STREAM RFCOMM sockets family"); SYSCTL_UINT(_net_bluetooth_rfcomm_sockets_stream, OID_AUTO, debug_level, CTLFLAG_RW, &ng_btsocket_rfcomm_debug_level, NG_BTSOCKET_INFO_LEVEL, "Bluetooth STREAM RFCOMM sockets debug level"); SYSCTL_UINT(_net_bluetooth_rfcomm_sockets_stream, OID_AUTO, timeout, CTLFLAG_RW, &ng_btsocket_rfcomm_timo, 60, "Bluetooth STREAM RFCOMM sockets timeout"); /***************************************************************************** ***************************************************************************** ** RFCOMM CRC ***************************************************************************** *****************************************************************************/ static u_int8_t ng_btsocket_rfcomm_crc_table[256] = { 0x00, 0x91, 0xe3, 0x72, 0x07, 0x96, 0xe4, 0x75, 0x0e, 0x9f, 0xed, 0x7c, 0x09, 0x98, 0xea, 0x7b, 0x1c, 0x8d, 0xff, 0x6e, 0x1b, 0x8a, 0xf8, 0x69, 0x12, 0x83, 0xf1, 0x60, 0x15, 0x84, 0xf6, 0x67, 0x38, 0xa9, 0xdb, 0x4a, 0x3f, 0xae, 0xdc, 0x4d, 0x36, 0xa7, 0xd5, 0x44, 0x31, 0xa0, 0xd2, 0x43, 0x24, 0xb5, 0xc7, 0x56, 0x23, 0xb2, 0xc0, 0x51, 0x2a, 0xbb, 0xc9, 0x58, 0x2d, 0xbc, 0xce, 0x5f, 0x70, 0xe1, 0x93, 0x02, 0x77, 0xe6, 0x94, 0x05, 0x7e, 0xef, 0x9d, 0x0c, 0x79, 0xe8, 0x9a, 0x0b, 0x6c, 0xfd, 0x8f, 0x1e, 0x6b, 0xfa, 0x88, 0x19, 0x62, 0xf3, 0x81, 0x10, 0x65, 0xf4, 0x86, 0x17, 0x48, 0xd9, 0xab, 0x3a, 0x4f, 0xde, 0xac, 0x3d, 0x46, 0xd7, 0xa5, 0x34, 0x41, 0xd0, 0xa2, 0x33, 0x54, 0xc5, 0xb7, 0x26, 0x53, 0xc2, 0xb0, 0x21, 0x5a, 0xcb, 0xb9, 0x28, 0x5d, 0xcc, 0xbe, 0x2f, 0xe0, 0x71, 0x03, 0x92, 0xe7, 0x76, 0x04, 0x95, 0xee, 0x7f, 0x0d, 0x9c, 0xe9, 0x78, 0x0a, 0x9b, 0xfc, 0x6d, 0x1f, 0x8e, 0xfb, 0x6a, 0x18, 0x89, 0xf2, 0x63, 0x11, 0x80, 0xf5, 0x64, 0x16, 0x87, 0xd8, 0x49, 0x3b, 0xaa, 0xdf, 0x4e, 0x3c, 0xad, 0xd6, 0x47, 0x35, 0xa4, 0xd1, 0x40, 0x32, 0xa3, 0xc4, 0x55, 0x27, 0xb6, 0xc3, 0x52, 0x20, 0xb1, 0xca, 0x5b, 0x29, 0xb8, 0xcd, 0x5c, 0x2e, 0xbf, 0x90, 0x01, 0x73, 0xe2, 0x97, 0x06, 0x74, 0xe5, 0x9e, 0x0f, 0x7d, 0xec, 0x99, 0x08, 0x7a, 0xeb, 0x8c, 0x1d, 0x6f, 0xfe, 0x8b, 0x1a, 0x68, 0xf9, 0x82, 0x13, 0x61, 0xf0, 0x85, 0x14, 0x66, 0xf7, 0xa8, 0x39, 0x4b, 0xda, 0xaf, 0x3e, 0x4c, 0xdd, 0xa6, 0x37, 0x45, 0xd4, 0xa1, 0x30, 0x42, 0xd3, 0xb4, 0x25, 0x57, 0xc6, 0xb3, 0x22, 0x50, 0xc1, 0xba, 0x2b, 0x59, 0xc8, 0xbd, 0x2c, 0x5e, 0xcf }; /* CRC */ static u_int8_t ng_btsocket_rfcomm_crc(u_int8_t *data, int length) { u_int8_t crc = 0xff; while (length --) crc = ng_btsocket_rfcomm_crc_table[crc ^ *data++]; return (crc); } /* ng_btsocket_rfcomm_crc */ /* FCS on 2 bytes */ static u_int8_t ng_btsocket_rfcomm_fcs2(u_int8_t *data) { return (0xff - ng_btsocket_rfcomm_crc(data, 2)); } /* ng_btsocket_rfcomm_fcs2 */ /* FCS on 3 bytes */ static u_int8_t ng_btsocket_rfcomm_fcs3(u_int8_t *data) { return (0xff - ng_btsocket_rfcomm_crc(data, 3)); } /* ng_btsocket_rfcomm_fcs3 */ /* * Check FCS * * From Bluetooth spec * * "... In 07.10, the frame check sequence (FCS) is calculated on different * sets of fields for different frame types. These are the fields that the * FCS are calculated on: * * For SABM, DISC, UA, DM frames: on Address, Control and length field. * For UIH frames: on Address and Control field. * * (This is stated here for clarification, and to set the standard for RFCOMM; * the fields included in FCS calculation have actually changed in version * 7.0.0 of TS 07.10, but RFCOMM will not change the FCS calculation scheme * from the one above.) ..." */ static int ng_btsocket_rfcomm_check_fcs(u_int8_t *data, int type, u_int8_t fcs) { if (type != RFCOMM_FRAME_UIH) return (ng_btsocket_rfcomm_fcs3(data) != fcs); return (ng_btsocket_rfcomm_fcs2(data) != fcs); } /* ng_btsocket_rfcomm_check_fcs */ /***************************************************************************** ***************************************************************************** ** Socket interface ***************************************************************************** *****************************************************************************/ /* * Initialize everything */ static void ng_btsocket_rfcomm_init(void *arg __unused) { ng_btsocket_rfcomm_debug_level = NG_BTSOCKET_WARN_LEVEL; ng_btsocket_rfcomm_timo = 60; /* RFCOMM task */ TASK_INIT(&ng_btsocket_rfcomm_task, 0, ng_btsocket_rfcomm_sessions_task, NULL); /* RFCOMM sessions list */ LIST_INIT(&ng_btsocket_rfcomm_sessions); mtx_init(&ng_btsocket_rfcomm_sessions_mtx, "btsocks_rfcomm_sessions_mtx", NULL, MTX_DEF); /* RFCOMM sockets list */ LIST_INIT(&ng_btsocket_rfcomm_sockets); mtx_init(&ng_btsocket_rfcomm_sockets_mtx, "btsocks_rfcomm_sockets_mtx", NULL, MTX_DEF); } /* ng_btsocket_rfcomm_init */ SYSINIT(ng_btsocket_rfcomm_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ng_btsocket_rfcomm_init, NULL); /* * Abort connection on socket */ void ng_btsocket_rfcomm_abort(struct socket *so) { so->so_error = ECONNABORTED; (void)ng_btsocket_rfcomm_disconnect(so); } /* ng_btsocket_rfcomm_abort */ void ng_btsocket_rfcomm_close(struct socket *so) { (void)ng_btsocket_rfcomm_disconnect(so); } /* ng_btsocket_rfcomm_close */ /* * Create and attach new socket */ int ng_btsocket_rfcomm_attach(struct socket *so, int proto, struct thread *td) { ng_btsocket_rfcomm_pcb_p pcb = so2rfcomm_pcb(so); int error; /* Check socket and protocol */ if (so->so_type != SOCK_STREAM) return (ESOCKTNOSUPPORT); #if 0 /* XXX sonewconn() calls "pru_attach" with proto == 0 */ if (proto != 0) if (proto != BLUETOOTH_PROTO_RFCOMM) return (EPROTONOSUPPORT); #endif /* XXX */ if (pcb != NULL) return (EISCONN); /* Reserve send and receive space if it is not reserved yet */ if ((so->so_snd.sb_hiwat == 0) || (so->so_rcv.sb_hiwat == 0)) { error = soreserve(so, NG_BTSOCKET_RFCOMM_SENDSPACE, NG_BTSOCKET_RFCOMM_RECVSPACE); if (error != 0) return (error); } /* Allocate the PCB */ pcb = malloc(sizeof(*pcb), M_NETGRAPH_BTSOCKET_RFCOMM, M_NOWAIT | M_ZERO); if (pcb == NULL) return (ENOMEM); /* Link the PCB and the socket */ so->so_pcb = (caddr_t) pcb; pcb->so = so; /* Initialize PCB */ pcb->state = NG_BTSOCKET_RFCOMM_DLC_CLOSED; pcb->flags = NG_BTSOCKET_RFCOMM_DLC_CFC; pcb->lmodem = pcb->rmodem = (RFCOMM_MODEM_RTC | RFCOMM_MODEM_RTR | RFCOMM_MODEM_DV); pcb->mtu = RFCOMM_DEFAULT_MTU; pcb->tx_cred = 0; pcb->rx_cred = RFCOMM_DEFAULT_CREDITS; mtx_init(&pcb->pcb_mtx, "btsocks_rfcomm_pcb_mtx", NULL, MTX_DEF); callout_init_mtx(&pcb->timo, &pcb->pcb_mtx, 0); /* Add the PCB to the list */ mtx_lock(&ng_btsocket_rfcomm_sockets_mtx); LIST_INSERT_HEAD(&ng_btsocket_rfcomm_sockets, pcb, next); mtx_unlock(&ng_btsocket_rfcomm_sockets_mtx); return (0); } /* ng_btsocket_rfcomm_attach */ /* * Bind socket */ int ng_btsocket_rfcomm_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_rfcomm_pcb_t *pcb = so2rfcomm_pcb(so), *pcb1; struct sockaddr_rfcomm *sa = (struct sockaddr_rfcomm *) nam; if (pcb == NULL) return (EINVAL); /* Verify address */ if (sa == NULL) return (EINVAL); if (sa->rfcomm_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if (sa->rfcomm_len != sizeof(*sa)) return (EINVAL); if (sa->rfcomm_channel > 30) return (EINVAL); mtx_lock(&pcb->pcb_mtx); if (sa->rfcomm_channel != 0) { mtx_lock(&ng_btsocket_rfcomm_sockets_mtx); LIST_FOREACH(pcb1, &ng_btsocket_rfcomm_sockets, next) { if (pcb1->channel == sa->rfcomm_channel && bcmp(&pcb1->src, &sa->rfcomm_bdaddr, sizeof(pcb1->src)) == 0) { mtx_unlock(&ng_btsocket_rfcomm_sockets_mtx); mtx_unlock(&pcb->pcb_mtx); return (EADDRINUSE); } } mtx_unlock(&ng_btsocket_rfcomm_sockets_mtx); } bcopy(&sa->rfcomm_bdaddr, &pcb->src, sizeof(pcb->src)); pcb->channel = sa->rfcomm_channel; mtx_unlock(&pcb->pcb_mtx); return (0); } /* ng_btsocket_rfcomm_bind */ /* * Connect socket */ int ng_btsocket_rfcomm_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_rfcomm_pcb_t *pcb = so2rfcomm_pcb(so); struct sockaddr_rfcomm *sa = (struct sockaddr_rfcomm *) nam; ng_btsocket_rfcomm_session_t *s = NULL; struct socket *l2so = NULL; int dlci, error = 0; if (pcb == NULL) return (EINVAL); /* Verify address */ if (sa == NULL) return (EINVAL); if (sa->rfcomm_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if (sa->rfcomm_len != sizeof(*sa)) return (EINVAL); if (sa->rfcomm_channel > 30) return (EINVAL); if (sa->rfcomm_channel == 0 || bcmp(&sa->rfcomm_bdaddr, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) return (EDESTADDRREQ); /* * Note that we will not check for errors in socreate() because * if we failed to create L2CAP socket at this point we still * might have already open session. */ error = socreate(PF_BLUETOOTH, &l2so, SOCK_SEQPACKET, BLUETOOTH_PROTO_L2CAP, td->td_ucred, td); /* * Look for session between "pcb->src" and "sa->rfcomm_bdaddr" (dst) */ mtx_lock(&ng_btsocket_rfcomm_sessions_mtx); s = ng_btsocket_rfcomm_session_by_addr(&pcb->src, &sa->rfcomm_bdaddr); if (s == NULL) { /* * We need to create new RFCOMM session. Check if we have L2CAP * socket. If l2so == NULL then error has the error code from * socreate() */ if (l2so == NULL) { mtx_unlock(&ng_btsocket_rfcomm_sessions_mtx); return (error); } error = ng_btsocket_rfcomm_session_create(&s, l2so, &pcb->src, &sa->rfcomm_bdaddr, td); if (error != 0) { mtx_unlock(&ng_btsocket_rfcomm_sessions_mtx); soclose(l2so); return (error); } } else if (l2so != NULL) soclose(l2so); /* we don't need new L2CAP socket */ /* * Check if we already have the same DLCI the same session */ mtx_lock(&s->session_mtx); mtx_lock(&pcb->pcb_mtx); dlci = RFCOMM_MKDLCI(!INITIATOR(s), sa->rfcomm_channel); if (ng_btsocket_rfcomm_pcb_by_dlci(s, dlci) != NULL) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&s->session_mtx); mtx_unlock(&ng_btsocket_rfcomm_sessions_mtx); return (EBUSY); } /* * Check session state and if its not acceptable then refuse connection */ switch (s->state) { case NG_BTSOCKET_RFCOMM_SESSION_CONNECTING: case NG_BTSOCKET_RFCOMM_SESSION_CONNECTED: case NG_BTSOCKET_RFCOMM_SESSION_OPEN: /* * Update destination address and channel and attach * DLC to the session */ bcopy(&sa->rfcomm_bdaddr, &pcb->dst, sizeof(pcb->dst)); pcb->channel = sa->rfcomm_channel; pcb->dlci = dlci; LIST_INSERT_HEAD(&s->dlcs, pcb, session_next); pcb->session = s; ng_btsocket_rfcomm_timeout(pcb); soisconnecting(pcb->so); if (s->state == NG_BTSOCKET_RFCOMM_SESSION_OPEN) { pcb->mtu = s->mtu; bcopy(&so2l2cap_pcb(s->l2so)->src, &pcb->src, sizeof(pcb->src)); pcb->state = NG_BTSOCKET_RFCOMM_DLC_CONFIGURING; error = ng_btsocket_rfcomm_send_pn(pcb); if (error == 0) error = ng_btsocket_rfcomm_task_wakeup(); } else pcb->state = NG_BTSOCKET_RFCOMM_DLC_W4_CONNECT; break; default: error = ECONNRESET; break; } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&s->session_mtx); mtx_unlock(&ng_btsocket_rfcomm_sessions_mtx); return (error); } /* ng_btsocket_rfcomm_connect */ /* * Process ioctl's calls on socket. * XXX FIXME this should provide interface to the RFCOMM multiplexor channel */ int ng_btsocket_rfcomm_control(struct socket *so, u_long cmd, void *data, struct ifnet *ifp, struct thread *td) { return (EINVAL); } /* ng_btsocket_rfcomm_control */ /* * Process getsockopt/setsockopt system calls */ int ng_btsocket_rfcomm_ctloutput(struct socket *so, struct sockopt *sopt) { ng_btsocket_rfcomm_pcb_p pcb = so2rfcomm_pcb(so); struct ng_btsocket_rfcomm_fc_info fcinfo; int error = 0; if (pcb == NULL) return (EINVAL); if (sopt->sopt_level != SOL_RFCOMM) return (0); mtx_lock(&pcb->pcb_mtx); switch (sopt->sopt_dir) { case SOPT_GET: switch (sopt->sopt_name) { case SO_RFCOMM_MTU: error = sooptcopyout(sopt, &pcb->mtu, sizeof(pcb->mtu)); break; case SO_RFCOMM_FC_INFO: fcinfo.lmodem = pcb->lmodem; fcinfo.rmodem = pcb->rmodem; fcinfo.tx_cred = pcb->tx_cred; fcinfo.rx_cred = pcb->rx_cred; fcinfo.cfc = (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC)? 1 : 0; fcinfo.reserved = 0; error = sooptcopyout(sopt, &fcinfo, sizeof(fcinfo)); break; default: error = ENOPROTOOPT; break; } break; case SOPT_SET: switch (sopt->sopt_name) { default: error = ENOPROTOOPT; break; } break; default: error = EINVAL; break; } mtx_unlock(&pcb->pcb_mtx); return (error); } /* ng_btsocket_rfcomm_ctloutput */ /* * Detach and destroy socket */ void ng_btsocket_rfcomm_detach(struct socket *so) { ng_btsocket_rfcomm_pcb_p pcb = so2rfcomm_pcb(so); KASSERT(pcb != NULL, ("ng_btsocket_rfcomm_detach: pcb == NULL")); mtx_lock(&pcb->pcb_mtx); switch (pcb->state) { case NG_BTSOCKET_RFCOMM_DLC_W4_CONNECT: case NG_BTSOCKET_RFCOMM_DLC_CONFIGURING: case NG_BTSOCKET_RFCOMM_DLC_CONNECTING: case NG_BTSOCKET_RFCOMM_DLC_CONNECTED: /* XXX What to do with pending request? */ if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMO) ng_btsocket_rfcomm_untimeout(pcb); if (pcb->state == NG_BTSOCKET_RFCOMM_DLC_W4_CONNECT) pcb->flags |= NG_BTSOCKET_RFCOMM_DLC_DETACHED; else pcb->state = NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING; ng_btsocket_rfcomm_task_wakeup(); break; case NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING: ng_btsocket_rfcomm_task_wakeup(); break; } while (pcb->state != NG_BTSOCKET_RFCOMM_DLC_CLOSED) msleep(&pcb->state, &pcb->pcb_mtx, PZERO, "rf_det", 0); if (pcb->session != NULL) panic("%s: pcb->session != NULL\n", __func__); if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMO) panic("%s: timeout on closed DLC, flags=%#x\n", __func__, pcb->flags); mtx_lock(&ng_btsocket_rfcomm_sockets_mtx); LIST_REMOVE(pcb, next); mtx_unlock(&ng_btsocket_rfcomm_sockets_mtx); mtx_unlock(&pcb->pcb_mtx); mtx_destroy(&pcb->pcb_mtx); bzero(pcb, sizeof(*pcb)); free(pcb, M_NETGRAPH_BTSOCKET_RFCOMM); soisdisconnected(so); so->so_pcb = NULL; } /* ng_btsocket_rfcomm_detach */ /* * Disconnect socket */ int ng_btsocket_rfcomm_disconnect(struct socket *so) { ng_btsocket_rfcomm_pcb_p pcb = so2rfcomm_pcb(so); if (pcb == NULL) return (EINVAL); mtx_lock(&pcb->pcb_mtx); if (pcb->state == NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING) { mtx_unlock(&pcb->pcb_mtx); return (EINPROGRESS); } /* XXX What to do with pending request? */ if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMO) ng_btsocket_rfcomm_untimeout(pcb); switch (pcb->state) { case NG_BTSOCKET_RFCOMM_DLC_CONFIGURING: /* XXX can we get here? */ case NG_BTSOCKET_RFCOMM_DLC_CONNECTING: /* XXX can we get here? */ case NG_BTSOCKET_RFCOMM_DLC_CONNECTED: /* * Just change DLC state and enqueue RFCOMM task. It will * queue and send DISC on the DLC. */ pcb->state = NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING; soisdisconnecting(so); ng_btsocket_rfcomm_task_wakeup(); break; case NG_BTSOCKET_RFCOMM_DLC_CLOSED: case NG_BTSOCKET_RFCOMM_DLC_W4_CONNECT: break; default: panic("%s: Invalid DLC state=%d, flags=%#x\n", __func__, pcb->state, pcb->flags); break; } mtx_unlock(&pcb->pcb_mtx); return (0); } /* ng_btsocket_rfcomm_disconnect */ /* * Listen on socket. First call to listen() will create listening RFCOMM session */ int ng_btsocket_rfcomm_listen(struct socket *so, int backlog, struct thread *td) { ng_btsocket_rfcomm_pcb_p pcb = so2rfcomm_pcb(so), pcb1; ng_btsocket_rfcomm_session_p s = NULL; struct socket *l2so = NULL; int error, socreate_error, usedchannels; if (pcb == NULL) return (EINVAL); if (pcb->channel > 30) return (EADDRNOTAVAIL); usedchannels = 0; mtx_lock(&pcb->pcb_mtx); if (pcb->channel == 0) { mtx_lock(&ng_btsocket_rfcomm_sockets_mtx); LIST_FOREACH(pcb1, &ng_btsocket_rfcomm_sockets, next) if (pcb1->channel != 0 && bcmp(&pcb1->src, &pcb->src, sizeof(pcb->src)) == 0) usedchannels |= (1 << (pcb1->channel - 1)); for (pcb->channel = 30; pcb->channel > 0; pcb->channel --) if (!(usedchannels & (1 << (pcb->channel - 1)))) break; if (pcb->channel == 0) { mtx_unlock(&ng_btsocket_rfcomm_sockets_mtx); mtx_unlock(&pcb->pcb_mtx); return (EADDRNOTAVAIL); } mtx_unlock(&ng_btsocket_rfcomm_sockets_mtx); } mtx_unlock(&pcb->pcb_mtx); /* * Note that we will not check for errors in socreate() because * if we failed to create L2CAP socket at this point we still * might have already open session. */ socreate_error = socreate(PF_BLUETOOTH, &l2so, SOCK_SEQPACKET, BLUETOOTH_PROTO_L2CAP, td->td_ucred, td); /* * Transition the socket and session into the LISTENING state. Check * for collisions first, as there can only be one. */ mtx_lock(&ng_btsocket_rfcomm_sessions_mtx); SOCK_LOCK(so); error = solisten_proto_check(so); SOCK_UNLOCK(so); if (error != 0) goto out; LIST_FOREACH(s, &ng_btsocket_rfcomm_sessions, next) if (s->state == NG_BTSOCKET_RFCOMM_SESSION_LISTENING) break; if (s == NULL) { /* * We need to create default RFCOMM session. Check if we have * L2CAP socket. If l2so == NULL then error has the error code * from socreate() */ if (l2so == NULL) { solisten_proto_abort(so); error = socreate_error; goto out; } /* * Create default listen RFCOMM session. The default RFCOMM * session will listen on ANY address. * * XXX FIXME Note that currently there is no way to adjust MTU * for the default session. */ error = ng_btsocket_rfcomm_session_create(&s, l2so, NG_HCI_BDADDR_ANY, NULL, td); if (error != 0) { solisten_proto_abort(so); goto out; } l2so = NULL; } SOCK_LOCK(so); solisten_proto(so, backlog); SOCK_UNLOCK(so); out: mtx_unlock(&ng_btsocket_rfcomm_sessions_mtx); /* * If we still have an l2so reference here, it's unneeded, so release * it. */ if (l2so != NULL) soclose(l2so); return (error); } /* ng_btsocket_listen */ -static int -ng_btsocket_rfcomm_peeraddr1(struct socket *so, struct sockaddr_rfcomm *sa) +/* + * Return peer address for getpeername(2) or for accept(2). For the latter + * case no extra work to do here, socket must be connected and ready. + */ +int +ng_btsocket_rfcomm_peeraddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_rfcomm_pcb_p pcb = so2rfcomm_pcb(so); + struct sockaddr_rfcomm *rfcomm = (struct sockaddr_rfcomm *)sa; if (pcb == NULL) return (EINVAL); - *sa = (struct sockaddr_rfcomm ){ + *rfcomm = (struct sockaddr_rfcomm ){ .rfcomm_len = sizeof(struct sockaddr_rfcomm), .rfcomm_family = AF_BLUETOOTH, .rfcomm_channel = pcb->channel, }; - bcopy(&pcb->dst, &sa->rfcomm_bdaddr, sizeof(sa->rfcomm_bdaddr)); + bcopy(&pcb->dst, &rfcomm->rfcomm_bdaddr, sizeof(rfcomm->rfcomm_bdaddr)); return (0); } -/* - * Get peer address - */ -int -ng_btsocket_rfcomm_peeraddr(struct socket *so, struct sockaddr **nam) -{ - struct sockaddr_rfcomm sa; - int error; - - error = ng_btsocket_rfcomm_peeraddr1(so, &sa); - if (error != 0) - return (error); - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); - - return ((*nam == NULL)? ENOMEM : 0); -} - -/* - * Accept connection on socket. Nothing to do here, socket must be connected - * and ready, so just return peer address and be done with it. - */ -int -ng_btsocket_rfcomm_accept(struct socket *so, struct sockaddr *sa) -{ - - return (ng_btsocket_rfcomm_peeraddr1(so, (struct sockaddr_rfcomm *)sa)); -} - /* * Send data to socket */ int ng_btsocket_rfcomm_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { ng_btsocket_rfcomm_pcb_t *pcb = so2rfcomm_pcb(so); int error = 0; /* Check socket and input */ if (pcb == NULL || m == NULL || control != NULL) { error = EINVAL; goto drop; } mtx_lock(&pcb->pcb_mtx); /* Make sure DLC is connected */ if (pcb->state != NG_BTSOCKET_RFCOMM_DLC_CONNECTED) { mtx_unlock(&pcb->pcb_mtx); error = ENOTCONN; goto drop; } /* Put the packet on the socket's send queue and wakeup RFCOMM task */ sbappend(&pcb->so->so_snd, m, flags); m = NULL; if (!(pcb->flags & NG_BTSOCKET_RFCOMM_DLC_SENDING)) { pcb->flags |= NG_BTSOCKET_RFCOMM_DLC_SENDING; error = ng_btsocket_rfcomm_task_wakeup(); } mtx_unlock(&pcb->pcb_mtx); drop: NG_FREE_M(m); /* checks for != NULL */ NG_FREE_M(control); return (error); } /* ng_btsocket_rfcomm_send */ /* * Get socket address */ int -ng_btsocket_rfcomm_sockaddr(struct socket *so, struct sockaddr **nam) +ng_btsocket_rfcomm_sockaddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_rfcomm_pcb_p pcb = so2rfcomm_pcb(so); - struct sockaddr_rfcomm sa; + struct sockaddr_rfcomm *rfcomm = (struct sockaddr_rfcomm *)sa; if (pcb == NULL) return (EINVAL); - bcopy(&pcb->src, &sa.rfcomm_bdaddr, sizeof(sa.rfcomm_bdaddr)); - sa.rfcomm_channel = pcb->channel; - sa.rfcomm_len = sizeof(sa); - sa.rfcomm_family = AF_BLUETOOTH; - - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); + *rfcomm = (struct sockaddr_rfcomm ){ + .rfcomm_len = sizeof(struct sockaddr_rfcomm), + .rfcomm_family = AF_BLUETOOTH, + .rfcomm_channel = pcb->channel, + }; + bcopy(&pcb->src, &rfcomm->rfcomm_bdaddr, sizeof(rfcomm->rfcomm_bdaddr)); - return ((*nam == NULL)? ENOMEM : 0); -} /* ng_btsocket_rfcomm_sockaddr */ + return (0); +} /* * Upcall function for L2CAP sockets. Enqueue RFCOMM task. */ static int ng_btsocket_rfcomm_upcall(struct socket *so, void *arg, int waitflag) { int error; if (so == NULL) panic("%s: so == NULL\n", __func__); if ((error = ng_btsocket_rfcomm_task_wakeup()) != 0) NG_BTSOCKET_RFCOMM_ALERT( "%s: Could not enqueue RFCOMM task, error=%d\n", __func__, error); return (SU_OK); } /* ng_btsocket_rfcomm_upcall */ /* * RFCOMM task. Will handle all RFCOMM sessions in one pass. * XXX FIXME does not scale very well */ static void ng_btsocket_rfcomm_sessions_task(void *ctx, int pending) { ng_btsocket_rfcomm_session_p s = NULL, s_next = NULL; mtx_lock(&ng_btsocket_rfcomm_sessions_mtx); for (s = LIST_FIRST(&ng_btsocket_rfcomm_sessions); s != NULL; ) { mtx_lock(&s->session_mtx); s_next = LIST_NEXT(s, next); ng_btsocket_rfcomm_session_task(s); if (s->state == NG_BTSOCKET_RFCOMM_SESSION_CLOSED) { /* Unlink and clean the session */ LIST_REMOVE(s, next); NG_BT_MBUFQ_DRAIN(&s->outq); if (!LIST_EMPTY(&s->dlcs)) panic("%s: DLC list is not empty\n", __func__); /* Close L2CAP socket */ SOCKBUF_LOCK(&s->l2so->so_rcv); soupcall_clear(s->l2so, SO_RCV); SOCKBUF_UNLOCK(&s->l2so->so_rcv); SOCKBUF_LOCK(&s->l2so->so_snd); soupcall_clear(s->l2so, SO_SND); SOCKBUF_UNLOCK(&s->l2so->so_snd); soclose(s->l2so); mtx_unlock(&s->session_mtx); mtx_destroy(&s->session_mtx); bzero(s, sizeof(*s)); free(s, M_NETGRAPH_BTSOCKET_RFCOMM); } else mtx_unlock(&s->session_mtx); s = s_next; } mtx_unlock(&ng_btsocket_rfcomm_sessions_mtx); } /* ng_btsocket_rfcomm_sessions_task */ /* * Process RFCOMM session. Will handle all RFCOMM sockets in one pass. */ static void ng_btsocket_rfcomm_session_task(ng_btsocket_rfcomm_session_p s) { mtx_assert(&s->session_mtx, MA_OWNED); if (s->l2so->so_rcv.sb_state & SBS_CANTRCVMORE) { NG_BTSOCKET_RFCOMM_INFO( "%s: L2CAP connection has been terminated, so=%p, so_state=%#x, so_count=%d, " \ "state=%d, flags=%#x\n", __func__, s->l2so, s->l2so->so_state, s->l2so->so_count, s->state, s->flags); s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; ng_btsocket_rfcomm_session_clean(s); } /* Now process upcall */ switch (s->state) { /* Try to accept new L2CAP connection(s) */ case NG_BTSOCKET_RFCOMM_SESSION_LISTENING: while (ng_btsocket_rfcomm_session_accept(s) == 0) ; break; /* Process the results of the L2CAP connect */ case NG_BTSOCKET_RFCOMM_SESSION_CONNECTING: ng_btsocket_rfcomm_session_process_pcb(s); if (ng_btsocket_rfcomm_session_connect(s) != 0) { s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; ng_btsocket_rfcomm_session_clean(s); } break; /* Try to receive/send more data */ case NG_BTSOCKET_RFCOMM_SESSION_CONNECTED: case NG_BTSOCKET_RFCOMM_SESSION_OPEN: case NG_BTSOCKET_RFCOMM_SESSION_DISCONNECTING: ng_btsocket_rfcomm_session_process_pcb(s); if (ng_btsocket_rfcomm_session_receive(s) != 0) { s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; ng_btsocket_rfcomm_session_clean(s); } else if (ng_btsocket_rfcomm_session_send(s) != 0) { s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; ng_btsocket_rfcomm_session_clean(s); } break; case NG_BTSOCKET_RFCOMM_SESSION_CLOSED: break; default: panic("%s: Invalid session state=%d, flags=%#x\n", __func__, s->state, s->flags); break; } } /* ng_btsocket_rfcomm_session_task */ /* * Process RFCOMM connection indicator. Caller must hold s->session_mtx */ static ng_btsocket_rfcomm_pcb_p ng_btsocket_rfcomm_connect_ind(ng_btsocket_rfcomm_session_p s, int channel) { ng_btsocket_rfcomm_pcb_p pcb = NULL, pcb1 = NULL; ng_btsocket_l2cap_pcb_p l2pcb = NULL; struct socket *so1; mtx_assert(&s->session_mtx, MA_OWNED); /* * Try to find RFCOMM socket that listens on given source address * and channel. This will return the best possible match. */ l2pcb = so2l2cap_pcb(s->l2so); pcb = ng_btsocket_rfcomm_pcb_listener(&l2pcb->src, channel); if (pcb == NULL) return (NULL); /* * Check the pending connections queue and if we have space then * create new socket and set proper source and destination address, * and channel. */ mtx_lock(&pcb->pcb_mtx); CURVNET_SET(pcb->so->so_vnet); so1 = sonewconn(pcb->so, 0); CURVNET_RESTORE(); mtx_unlock(&pcb->pcb_mtx); if (so1 == NULL) return (NULL); /* * If we got here than we have created new socket. So complete the * connection. Set source and destination address from the session. */ pcb1 = so2rfcomm_pcb(so1); if (pcb1 == NULL) panic("%s: pcb1 == NULL\n", __func__); mtx_lock(&pcb1->pcb_mtx); bcopy(&l2pcb->src, &pcb1->src, sizeof(pcb1->src)); bcopy(&l2pcb->dst, &pcb1->dst, sizeof(pcb1->dst)); pcb1->channel = channel; /* Link new DLC to the session. We already hold s->session_mtx */ LIST_INSERT_HEAD(&s->dlcs, pcb1, session_next); pcb1->session = s; mtx_unlock(&pcb1->pcb_mtx); return (pcb1); } /* ng_btsocket_rfcomm_connect_ind */ /* * Process RFCOMM connect confirmation. Caller must hold s->session_mtx. */ static void ng_btsocket_rfcomm_connect_cfm(ng_btsocket_rfcomm_session_p s) { ng_btsocket_rfcomm_pcb_p pcb = NULL, pcb_next = NULL; int error; mtx_assert(&s->session_mtx, MA_OWNED); /* * Wake up all waiting sockets and send PN request for each of them. * Note that timeout already been set in ng_btsocket_rfcomm_connect() * * Note: cannot use LIST_FOREACH because ng_btsocket_rfcomm_pcb_kill * will unlink DLC from the session */ for (pcb = LIST_FIRST(&s->dlcs); pcb != NULL; ) { mtx_lock(&pcb->pcb_mtx); pcb_next = LIST_NEXT(pcb, session_next); if (pcb->state == NG_BTSOCKET_RFCOMM_DLC_W4_CONNECT) { pcb->mtu = s->mtu; bcopy(&so2l2cap_pcb(s->l2so)->src, &pcb->src, sizeof(pcb->src)); error = ng_btsocket_rfcomm_send_pn(pcb); if (error == 0) pcb->state = NG_BTSOCKET_RFCOMM_DLC_CONFIGURING; else ng_btsocket_rfcomm_pcb_kill(pcb, error); } mtx_unlock(&pcb->pcb_mtx); pcb = pcb_next; } } /* ng_btsocket_rfcomm_connect_cfm */ /***************************************************************************** ***************************************************************************** ** RFCOMM sessions ***************************************************************************** *****************************************************************************/ /* * Create new RFCOMM session. That function WILL NOT take ownership over l2so. * Caller MUST free l2so if function failed. */ static int ng_btsocket_rfcomm_session_create(ng_btsocket_rfcomm_session_p *sp, struct socket *l2so, bdaddr_p src, bdaddr_p dst, struct thread *td) { ng_btsocket_rfcomm_session_p s = NULL; struct sockaddr_l2cap l2sa; struct sockopt l2sopt; int error; u_int16_t mtu; mtx_assert(&ng_btsocket_rfcomm_sessions_mtx, MA_OWNED); /* Allocate the RFCOMM session */ s = malloc(sizeof(*s), M_NETGRAPH_BTSOCKET_RFCOMM, M_NOWAIT | M_ZERO); if (s == NULL) return (ENOMEM); /* Set defaults */ s->mtu = RFCOMM_DEFAULT_MTU; s->flags = 0; s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; NG_BT_MBUFQ_INIT(&s->outq, ifqmaxlen); /* * XXX Mark session mutex as DUPOK to prevent "duplicated lock of * the same type" message. When accepting new L2CAP connection * ng_btsocket_rfcomm_session_accept() holds both session mutexes * for "old" (accepting) session and "new" (created) session. */ mtx_init(&s->session_mtx, "btsocks_rfcomm_session_mtx", NULL, MTX_DEF|MTX_DUPOK); LIST_INIT(&s->dlcs); /* Prepare L2CAP socket */ SOCKBUF_LOCK(&l2so->so_rcv); soupcall_set(l2so, SO_RCV, ng_btsocket_rfcomm_upcall, NULL); SOCKBUF_UNLOCK(&l2so->so_rcv); SOCKBUF_LOCK(&l2so->so_snd); soupcall_set(l2so, SO_SND, ng_btsocket_rfcomm_upcall, NULL); SOCKBUF_UNLOCK(&l2so->so_snd); l2so->so_state |= SS_NBIO; s->l2so = l2so; mtx_lock(&s->session_mtx); /* * "src" == NULL and "dst" == NULL means just create session. * caller must do the rest */ if (src == NULL && dst == NULL) goto done; /* * Set incoming MTU on L2CAP socket. It is RFCOMM session default MTU * plus 5 bytes: RFCOMM frame header, one extra byte for length and one * extra byte for credits. */ mtu = s->mtu + sizeof(struct rfcomm_frame_hdr) + 1 + 1; l2sopt.sopt_dir = SOPT_SET; l2sopt.sopt_level = SOL_L2CAP; l2sopt.sopt_name = SO_L2CAP_IMTU; l2sopt.sopt_val = (void *) &mtu; l2sopt.sopt_valsize = sizeof(mtu); l2sopt.sopt_td = NULL; error = sosetopt(s->l2so, &l2sopt); if (error != 0) goto bad; /* Bind socket to "src" address */ l2sa.l2cap_len = sizeof(l2sa); l2sa.l2cap_family = AF_BLUETOOTH; l2sa.l2cap_psm = (dst == NULL)? htole16(NG_L2CAP_PSM_RFCOMM) : 0; bcopy(src, &l2sa.l2cap_bdaddr, sizeof(l2sa.l2cap_bdaddr)); l2sa.l2cap_cid = 0; l2sa.l2cap_bdaddr_type = BDADDR_BREDR; error = sobind(s->l2so, (struct sockaddr *) &l2sa, td); if (error != 0) goto bad; /* If "dst" is not NULL then initiate connect(), otherwise listen() */ if (dst == NULL) { s->flags = 0; s->state = NG_BTSOCKET_RFCOMM_SESSION_LISTENING; error = solisten(s->l2so, 10, td); if (error != 0) goto bad; } else { s->flags = NG_BTSOCKET_RFCOMM_SESSION_INITIATOR; s->state = NG_BTSOCKET_RFCOMM_SESSION_CONNECTING; l2sa.l2cap_len = sizeof(l2sa); l2sa.l2cap_family = AF_BLUETOOTH; l2sa.l2cap_psm = htole16(NG_L2CAP_PSM_RFCOMM); bcopy(dst, &l2sa.l2cap_bdaddr, sizeof(l2sa.l2cap_bdaddr)); l2sa.l2cap_cid = 0; l2sa.l2cap_bdaddr_type = BDADDR_BREDR; error = soconnect(s->l2so, (struct sockaddr *) &l2sa, td); if (error != 0) goto bad; } done: LIST_INSERT_HEAD(&ng_btsocket_rfcomm_sessions, s, next); *sp = s; mtx_unlock(&s->session_mtx); return (0); bad: mtx_unlock(&s->session_mtx); /* Return L2CAP socket back to its original state */ SOCKBUF_LOCK(&l2so->so_rcv); soupcall_clear(s->l2so, SO_RCV); SOCKBUF_UNLOCK(&l2so->so_rcv); SOCKBUF_LOCK(&l2so->so_snd); soupcall_clear(s->l2so, SO_SND); SOCKBUF_UNLOCK(&l2so->so_snd); l2so->so_state &= ~SS_NBIO; mtx_destroy(&s->session_mtx); bzero(s, sizeof(*s)); free(s, M_NETGRAPH_BTSOCKET_RFCOMM); return (error); } /* ng_btsocket_rfcomm_session_create */ /* * Process accept() on RFCOMM session * XXX FIXME locking for "l2so"? */ static int ng_btsocket_rfcomm_session_accept(ng_btsocket_rfcomm_session_p s0) { struct socket *l2so; struct sockaddr_l2cap l2sa = { .l2cap_len = sizeof(l2sa) }; ng_btsocket_l2cap_pcb_t *l2pcb = NULL; ng_btsocket_rfcomm_session_p s = NULL; int error; mtx_assert(&ng_btsocket_rfcomm_sessions_mtx, MA_OWNED); mtx_assert(&s0->session_mtx, MA_OWNED); SOLISTEN_LOCK(s0->l2so); error = solisten_dequeue(s0->l2so, &l2so, 0); if (error == EWOULDBLOCK) return (error); if (error) { NG_BTSOCKET_RFCOMM_ERR( "%s: Could not accept connection on L2CAP socket, error=%d\n", __func__, error); return (error); } error = soaccept(l2so, (struct sockaddr *)&l2sa); if (error != 0) { NG_BTSOCKET_RFCOMM_ERR( "%s: soaccept() on L2CAP socket failed, error=%d\n", __func__, error); soclose(l2so); return (error); } /* * Check if there is already active RFCOMM session between two devices. * If so then close L2CAP connection. We only support one RFCOMM session * between each pair of devices. Note that here we assume session in any * state. The session even could be in the middle of disconnecting. */ l2pcb = so2l2cap_pcb(l2so); s = ng_btsocket_rfcomm_session_by_addr(&l2pcb->src, &l2pcb->dst); if (s == NULL) { /* Create a new RFCOMM session */ error = ng_btsocket_rfcomm_session_create(&s, l2so, NULL, NULL, curthread /* XXX */); if (error == 0) { mtx_lock(&s->session_mtx); s->flags = 0; s->state = NG_BTSOCKET_RFCOMM_SESSION_CONNECTED; /* * Adjust MTU on incoming connection. Reserve 5 bytes: * RFCOMM frame header, one extra byte for length and * one extra byte for credits. */ s->mtu = min(l2pcb->imtu, l2pcb->omtu) - sizeof(struct rfcomm_frame_hdr) - 1 - 1; mtx_unlock(&s->session_mtx); } else { NG_BTSOCKET_RFCOMM_ALERT( "%s: Failed to create new RFCOMM session, error=%d\n", __func__, error); soclose(l2so); } } else { NG_BTSOCKET_RFCOMM_WARN( "%s: Rejecting duplicating RFCOMM session between src=%x:%x:%x:%x:%x:%x and " \ "dst=%x:%x:%x:%x:%x:%x, state=%d, flags=%#x\n", __func__, l2pcb->src.b[5], l2pcb->src.b[4], l2pcb->src.b[3], l2pcb->src.b[2], l2pcb->src.b[1], l2pcb->src.b[0], l2pcb->dst.b[5], l2pcb->dst.b[4], l2pcb->dst.b[3], l2pcb->dst.b[2], l2pcb->dst.b[1], l2pcb->dst.b[0], s->state, s->flags); error = EBUSY; soclose(l2so); } return (error); } /* ng_btsocket_rfcomm_session_accept */ /* * Process connect() on RFCOMM session * XXX FIXME locking for "l2so"? */ static int ng_btsocket_rfcomm_session_connect(ng_btsocket_rfcomm_session_p s) { ng_btsocket_l2cap_pcb_p l2pcb = so2l2cap_pcb(s->l2so); int error; mtx_assert(&s->session_mtx, MA_OWNED); /* First check if connection has failed */ if ((error = s->l2so->so_error) != 0) { s->l2so->so_error = 0; NG_BTSOCKET_RFCOMM_ERR( "%s: Could not connect RFCOMM session, error=%d, state=%d, flags=%#x\n", __func__, error, s->state, s->flags); return (error); } /* Is connection still in progress? */ if (s->l2so->so_state & SS_ISCONNECTING) return (0); /* * If we got here then we are connected. Send SABM on DLCI 0 to * open multiplexor channel. */ if (error == 0) { s->state = NG_BTSOCKET_RFCOMM_SESSION_CONNECTED; /* * Adjust MTU on outgoing connection. Reserve 5 bytes: RFCOMM * frame header, one extra byte for length and one extra byte * for credits. */ s->mtu = min(l2pcb->imtu, l2pcb->omtu) - sizeof(struct rfcomm_frame_hdr) - 1 - 1; error = ng_btsocket_rfcomm_send_command(s,RFCOMM_FRAME_SABM,0); if (error == 0) error = ng_btsocket_rfcomm_task_wakeup(); } return (error); }/* ng_btsocket_rfcomm_session_connect */ /* * Receive data on RFCOMM session * XXX FIXME locking for "l2so"? */ static int ng_btsocket_rfcomm_session_receive(ng_btsocket_rfcomm_session_p s) { struct mbuf *m = NULL; struct uio uio; int more, flags, error; mtx_assert(&s->session_mtx, MA_OWNED); /* Can we read from the L2CAP socket? */ if (!soreadable(s->l2so)) return (0); /* First check for error on L2CAP socket */ if ((error = s->l2so->so_error) != 0) { s->l2so->so_error = 0; NG_BTSOCKET_RFCOMM_ERR( "%s: Could not receive data from L2CAP socket, error=%d, state=%d, flags=%#x\n", __func__, error, s->state, s->flags); return (error); } /* * Read all packets from the L2CAP socket. * XXX FIXME/VERIFY is that correct? For now use m->m_nextpkt as * indication that there is more packets on the socket's buffer. * Also what should we use in uio.uio_resid? * May be s->mtu + sizeof(struct rfcomm_frame_hdr) + 1 + 1? */ for (more = 1; more; ) { /* Try to get next packet from socket */ bzero(&uio, sizeof(uio)); /* uio.uio_td = NULL; */ uio.uio_resid = 1000000000; flags = MSG_DONTWAIT; m = NULL; error = soreceive(s->l2so, NULL, &uio, &m, (struct mbuf **) NULL, &flags); if (error != 0) { if (error == EWOULDBLOCK) return (0); /* XXX can happen? */ NG_BTSOCKET_RFCOMM_ERR( "%s: Could not receive data from L2CAP socket, error=%d\n", __func__, error); return (error); } more = (m->m_nextpkt != NULL); m->m_nextpkt = NULL; ng_btsocket_rfcomm_receive_frame(s, m); } return (0); } /* ng_btsocket_rfcomm_session_receive */ /* * Send data on RFCOMM session * XXX FIXME locking for "l2so"? */ static int ng_btsocket_rfcomm_session_send(ng_btsocket_rfcomm_session_p s) { struct mbuf *m = NULL; int error; mtx_assert(&s->session_mtx, MA_OWNED); /* Send as much as we can from the session queue */ while (sowriteable(s->l2so)) { /* Check if socket still OK */ if ((error = s->l2so->so_error) != 0) { s->l2so->so_error = 0; NG_BTSOCKET_RFCOMM_ERR( "%s: Detected error=%d on L2CAP socket, state=%d, flags=%#x\n", __func__, error, s->state, s->flags); return (error); } NG_BT_MBUFQ_DEQUEUE(&s->outq, m); if (m == NULL) return (0); /* we are done */ /* Call send function on the L2CAP socket */ error = s->l2so->so_proto->pr_send(s->l2so, 0, m, NULL, NULL, curthread /* XXX */); if (error != 0) { NG_BTSOCKET_RFCOMM_ERR( "%s: Could not send data to L2CAP socket, error=%d\n", __func__, error); return (error); } } return (0); } /* ng_btsocket_rfcomm_session_send */ /* * Close and disconnect all DLCs for the given session. Caller must hold * s->sesson_mtx. Will wakeup session. */ static void ng_btsocket_rfcomm_session_clean(ng_btsocket_rfcomm_session_p s) { ng_btsocket_rfcomm_pcb_p pcb = NULL, pcb_next = NULL; int error; mtx_assert(&s->session_mtx, MA_OWNED); /* * Note: cannot use LIST_FOREACH because ng_btsocket_rfcomm_pcb_kill * will unlink DLC from the session */ for (pcb = LIST_FIRST(&s->dlcs); pcb != NULL; ) { mtx_lock(&pcb->pcb_mtx); pcb_next = LIST_NEXT(pcb, session_next); NG_BTSOCKET_RFCOMM_INFO( "%s: Disconnecting dlci=%d, state=%d, flags=%#x\n", __func__, pcb->dlci, pcb->state, pcb->flags); if (pcb->state == NG_BTSOCKET_RFCOMM_DLC_CONNECTED) error = ECONNRESET; else error = ECONNREFUSED; ng_btsocket_rfcomm_pcb_kill(pcb, error); mtx_unlock(&pcb->pcb_mtx); pcb = pcb_next; } } /* ng_btsocket_rfcomm_session_clean */ /* * Process all DLCs on the session. Caller MUST hold s->session_mtx. */ static void ng_btsocket_rfcomm_session_process_pcb(ng_btsocket_rfcomm_session_p s) { ng_btsocket_rfcomm_pcb_p pcb = NULL, pcb_next = NULL; int error; mtx_assert(&s->session_mtx, MA_OWNED); /* * Note: cannot use LIST_FOREACH because ng_btsocket_rfcomm_pcb_kill * will unlink DLC from the session */ for (pcb = LIST_FIRST(&s->dlcs); pcb != NULL; ) { mtx_lock(&pcb->pcb_mtx); pcb_next = LIST_NEXT(pcb, session_next); switch (pcb->state) { /* * If DLC in W4_CONNECT state then we should check for both * timeout and detach. */ case NG_BTSOCKET_RFCOMM_DLC_W4_CONNECT: if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_DETACHED) ng_btsocket_rfcomm_pcb_kill(pcb, 0); else if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMEDOUT) ng_btsocket_rfcomm_pcb_kill(pcb, ETIMEDOUT); break; /* * If DLC in CONFIGURING or CONNECTING state then we only * should check for timeout. If detach() was called then * DLC will be moved into DISCONNECTING state. */ case NG_BTSOCKET_RFCOMM_DLC_CONFIGURING: case NG_BTSOCKET_RFCOMM_DLC_CONNECTING: if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMEDOUT) ng_btsocket_rfcomm_pcb_kill(pcb, ETIMEDOUT); break; /* * If DLC in CONNECTED state then we need to send data (if any) * from the socket's send queue. Note that we will send data * from either all sockets or none. This may overload session's * outgoing queue (but we do not check for that). * * XXX FIXME need scheduler for RFCOMM sockets */ case NG_BTSOCKET_RFCOMM_DLC_CONNECTED: error = ng_btsocket_rfcomm_pcb_send(pcb, ALOT); if (error != 0) ng_btsocket_rfcomm_pcb_kill(pcb, error); break; /* * If DLC in DISCONNECTING state then we must send DISC frame. * Note that if DLC has timeout set then we do not need to * resend DISC frame. * * XXX FIXME need to drain all data from the socket's queue * if LINGER option was set */ case NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING: if (!(pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMO)) { error = ng_btsocket_rfcomm_send_command( pcb->session, RFCOMM_FRAME_DISC, pcb->dlci); if (error == 0) ng_btsocket_rfcomm_timeout(pcb); else ng_btsocket_rfcomm_pcb_kill(pcb, error); } else if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMEDOUT) ng_btsocket_rfcomm_pcb_kill(pcb, ETIMEDOUT); break; /* case NG_BTSOCKET_RFCOMM_DLC_CLOSED: */ default: panic("%s: Invalid DLC state=%d, flags=%#x\n", __func__, pcb->state, pcb->flags); break; } mtx_unlock(&pcb->pcb_mtx); pcb = pcb_next; } } /* ng_btsocket_rfcomm_session_process_pcb */ /* * Find RFCOMM session between "src" and "dst". * Caller MUST hold ng_btsocket_rfcomm_sessions_mtx. */ static ng_btsocket_rfcomm_session_p ng_btsocket_rfcomm_session_by_addr(bdaddr_p src, bdaddr_p dst) { ng_btsocket_rfcomm_session_p s = NULL; ng_btsocket_l2cap_pcb_p l2pcb = NULL; int any_src; mtx_assert(&ng_btsocket_rfcomm_sessions_mtx, MA_OWNED); any_src = (bcmp(src, NG_HCI_BDADDR_ANY, sizeof(*src)) == 0); LIST_FOREACH(s, &ng_btsocket_rfcomm_sessions, next) { l2pcb = so2l2cap_pcb(s->l2so); if ((any_src || bcmp(&l2pcb->src, src, sizeof(*src)) == 0) && bcmp(&l2pcb->dst, dst, sizeof(*dst)) == 0) break; } return (s); } /* ng_btsocket_rfcomm_session_by_addr */ /***************************************************************************** ***************************************************************************** ** RFCOMM ***************************************************************************** *****************************************************************************/ /* * Process incoming RFCOMM frame. Caller must hold s->session_mtx. * XXX FIXME check frame length */ static int ng_btsocket_rfcomm_receive_frame(ng_btsocket_rfcomm_session_p s, struct mbuf *m0) { struct rfcomm_frame_hdr *hdr = NULL; struct mbuf *m = NULL; u_int16_t length; u_int8_t dlci, type; int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); /* Pullup as much as we can into first mbuf (for direct access) */ length = min(m0->m_pkthdr.len, MHLEN); if (m0->m_len < length) { if ((m0 = m_pullup(m0, length)) == NULL) { NG_BTSOCKET_RFCOMM_ALERT( "%s: m_pullup(%d) failed\n", __func__, length); return (ENOBUFS); } } hdr = mtod(m0, struct rfcomm_frame_hdr *); dlci = RFCOMM_DLCI(hdr->address); type = RFCOMM_TYPE(hdr->control); /* Test EA bit in length. If not set then we have 2 bytes of length */ if (!RFCOMM_EA(hdr->length)) { bcopy(&hdr->length, &length, sizeof(length)); length = le16toh(length) >> 1; m_adj(m0, sizeof(*hdr) + 1); } else { length = hdr->length >> 1; m_adj(m0, sizeof(*hdr)); } NG_BTSOCKET_RFCOMM_INFO( "%s: Got frame type=%#x, dlci=%d, length=%d, cr=%d, pf=%d, len=%d\n", __func__, type, dlci, length, RFCOMM_CR(hdr->address), RFCOMM_PF(hdr->control), m0->m_pkthdr.len); /* * Get FCS (the last byte in the frame) * XXX this will not work if mbuf chain ends with empty mbuf. * XXX let's hope it never happens :) */ for (m = m0; m->m_next != NULL; m = m->m_next) ; if (m->m_len <= 0) panic("%s: Empty mbuf at the end of the chain, len=%d\n", __func__, m->m_len); /* * Check FCS. We only need to calculate FCS on first 2 or 3 bytes * and already m_pullup'ed mbuf chain, so it should be safe. */ if (ng_btsocket_rfcomm_check_fcs((u_int8_t *) hdr, type, m->m_data[m->m_len - 1])) { NG_BTSOCKET_RFCOMM_ERR( "%s: Invalid RFCOMM packet. Bad checksum\n", __func__); NG_FREE_M(m0); return (EINVAL); } m_adj(m0, -1); /* Trim FCS byte */ /* * Process RFCOMM frame. * * From TS 07.10 spec * * "... In the case where a SABM or DISC command with the P bit set * to 0 is received then the received frame shall be discarded..." * * "... If a unsolicited DM response is received then the frame shall * be processed irrespective of the P/F setting... " * * "... The station may transmit response frames with the F bit set * to 0 at any opportunity on an asynchronous basis. However, in the * case where a UA response is received with the F bit set to 0 then * the received frame shall be discarded..." * * From Bluetooth spec * * "... When credit based flow control is being used, the meaning of * the P/F bit in the control field of the RFCOMM header is redefined * for UIH frames..." */ switch (type) { case RFCOMM_FRAME_SABM: if (RFCOMM_PF(hdr->control)) error = ng_btsocket_rfcomm_receive_sabm(s, dlci); break; case RFCOMM_FRAME_DISC: if (RFCOMM_PF(hdr->control)) error = ng_btsocket_rfcomm_receive_disc(s, dlci); break; case RFCOMM_FRAME_UA: if (RFCOMM_PF(hdr->control)) error = ng_btsocket_rfcomm_receive_ua(s, dlci); break; case RFCOMM_FRAME_DM: error = ng_btsocket_rfcomm_receive_dm(s, dlci); break; case RFCOMM_FRAME_UIH: if (dlci == 0) error = ng_btsocket_rfcomm_receive_mcc(s, m0); else error = ng_btsocket_rfcomm_receive_uih(s, dlci, RFCOMM_PF(hdr->control), m0); return (error); /* NOT REACHED */ default: NG_BTSOCKET_RFCOMM_ERR( "%s: Invalid RFCOMM packet. Unknown type=%#x\n", __func__, type); error = EINVAL; break; } NG_FREE_M(m0); return (error); } /* ng_btsocket_rfcomm_receive_frame */ /* * Process RFCOMM SABM frame */ static int ng_btsocket_rfcomm_receive_sabm(ng_btsocket_rfcomm_session_p s, int dlci) { ng_btsocket_rfcomm_pcb_p pcb = NULL; int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got SABM, session state=%d, flags=%#x, mtu=%d, dlci=%d\n", __func__, s->state, s->flags, s->mtu, dlci); /* DLCI == 0 means open multiplexor channel */ if (dlci == 0) { switch (s->state) { case NG_BTSOCKET_RFCOMM_SESSION_CONNECTED: case NG_BTSOCKET_RFCOMM_SESSION_OPEN: error = ng_btsocket_rfcomm_send_command(s, RFCOMM_FRAME_UA, dlci); if (error == 0) { s->state = NG_BTSOCKET_RFCOMM_SESSION_OPEN; ng_btsocket_rfcomm_connect_cfm(s); } else { s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; ng_btsocket_rfcomm_session_clean(s); } break; default: NG_BTSOCKET_RFCOMM_WARN( "%s: Got SABM for session in invalid state state=%d, flags=%#x\n", __func__, s->state, s->flags); error = EINVAL; break; } return (error); } /* Make sure multiplexor channel is open */ if (s->state != NG_BTSOCKET_RFCOMM_SESSION_OPEN) { NG_BTSOCKET_RFCOMM_ERR( "%s: Got SABM for dlci=%d with multiplexor channel closed, state=%d, " \ "flags=%#x\n", __func__, dlci, s->state, s->flags); return (EINVAL); } /* * Check if we have this DLCI. This might happen when remote * peer uses PN command before actual open (SABM) happens. */ pcb = ng_btsocket_rfcomm_pcb_by_dlci(s, dlci); if (pcb != NULL) { mtx_lock(&pcb->pcb_mtx); if (pcb->state != NG_BTSOCKET_RFCOMM_DLC_CONNECTING) { NG_BTSOCKET_RFCOMM_ERR( "%s: Got SABM for dlci=%d in invalid state=%d, flags=%#x\n", __func__, dlci, pcb->state, pcb->flags); mtx_unlock(&pcb->pcb_mtx); return (ENOENT); } ng_btsocket_rfcomm_untimeout(pcb); error = ng_btsocket_rfcomm_send_command(s,RFCOMM_FRAME_UA,dlci); if (error == 0) error = ng_btsocket_rfcomm_send_msc(pcb); if (error == 0) { pcb->state = NG_BTSOCKET_RFCOMM_DLC_CONNECTED; soisconnected(pcb->so); } else ng_btsocket_rfcomm_pcb_kill(pcb, error); mtx_unlock(&pcb->pcb_mtx); return (error); } /* * We do not have requested DLCI, so it must be an incoming connection * with default parameters. Try to accept it. */ pcb = ng_btsocket_rfcomm_connect_ind(s, RFCOMM_SRVCHANNEL(dlci)); if (pcb != NULL) { mtx_lock(&pcb->pcb_mtx); pcb->dlci = dlci; error = ng_btsocket_rfcomm_send_command(s,RFCOMM_FRAME_UA,dlci); if (error == 0) error = ng_btsocket_rfcomm_send_msc(pcb); if (error == 0) { pcb->state = NG_BTSOCKET_RFCOMM_DLC_CONNECTED; soisconnected(pcb->so); } else ng_btsocket_rfcomm_pcb_kill(pcb, error); mtx_unlock(&pcb->pcb_mtx); } else /* Nobody is listen()ing on the requested DLCI */ error = ng_btsocket_rfcomm_send_command(s,RFCOMM_FRAME_DM,dlci); return (error); } /* ng_btsocket_rfcomm_receive_sabm */ /* * Process RFCOMM DISC frame */ static int ng_btsocket_rfcomm_receive_disc(ng_btsocket_rfcomm_session_p s, int dlci) { ng_btsocket_rfcomm_pcb_p pcb = NULL; int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got DISC, session state=%d, flags=%#x, mtu=%d, dlci=%d\n", __func__, s->state, s->flags, s->mtu, dlci); /* DLCI == 0 means close multiplexor channel */ if (dlci == 0) { /* XXX FIXME assume that remote side will close the socket */ error = ng_btsocket_rfcomm_send_command(s, RFCOMM_FRAME_UA, 0); if (error == 0) { if (s->state == NG_BTSOCKET_RFCOMM_SESSION_DISCONNECTING) s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; /* XXX */ else s->state = NG_BTSOCKET_RFCOMM_SESSION_DISCONNECTING; } else s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; /* XXX */ ng_btsocket_rfcomm_session_clean(s); } else { pcb = ng_btsocket_rfcomm_pcb_by_dlci(s, dlci); if (pcb != NULL) { int err; mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_RFCOMM_INFO( "%s: Got DISC for dlci=%d, state=%d, flags=%#x\n", __func__, dlci, pcb->state, pcb->flags); error = ng_btsocket_rfcomm_send_command(s, RFCOMM_FRAME_UA, dlci); if (pcb->state == NG_BTSOCKET_RFCOMM_DLC_CONNECTED) err = 0; else err = ECONNREFUSED; ng_btsocket_rfcomm_pcb_kill(pcb, err); mtx_unlock(&pcb->pcb_mtx); } else { NG_BTSOCKET_RFCOMM_WARN( "%s: Got DISC for non-existing dlci=%d\n", __func__, dlci); error = ng_btsocket_rfcomm_send_command(s, RFCOMM_FRAME_DM, dlci); } } return (error); } /* ng_btsocket_rfcomm_receive_disc */ /* * Process RFCOMM UA frame */ static int ng_btsocket_rfcomm_receive_ua(ng_btsocket_rfcomm_session_p s, int dlci) { ng_btsocket_rfcomm_pcb_p pcb = NULL; int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got UA, session state=%d, flags=%#x, mtu=%d, dlci=%d\n", __func__, s->state, s->flags, s->mtu, dlci); /* dlci == 0 means multiplexor channel */ if (dlci == 0) { switch (s->state) { case NG_BTSOCKET_RFCOMM_SESSION_CONNECTED: s->state = NG_BTSOCKET_RFCOMM_SESSION_OPEN; ng_btsocket_rfcomm_connect_cfm(s); break; case NG_BTSOCKET_RFCOMM_SESSION_DISCONNECTING: s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; ng_btsocket_rfcomm_session_clean(s); break; default: NG_BTSOCKET_RFCOMM_WARN( "%s: Got UA for session in invalid state=%d(%d), flags=%#x, mtu=%d\n", __func__, s->state, INITIATOR(s), s->flags, s->mtu); error = ENOENT; break; } return (error); } /* Check if we have this DLCI */ pcb = ng_btsocket_rfcomm_pcb_by_dlci(s, dlci); if (pcb != NULL) { mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_RFCOMM_INFO( "%s: Got UA for dlci=%d, state=%d, flags=%#x\n", __func__, dlci, pcb->state, pcb->flags); switch (pcb->state) { case NG_BTSOCKET_RFCOMM_DLC_CONNECTING: ng_btsocket_rfcomm_untimeout(pcb); error = ng_btsocket_rfcomm_send_msc(pcb); if (error == 0) { pcb->state = NG_BTSOCKET_RFCOMM_DLC_CONNECTED; soisconnected(pcb->so); } break; case NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING: ng_btsocket_rfcomm_pcb_kill(pcb, 0); break; default: NG_BTSOCKET_RFCOMM_WARN( "%s: Got UA for dlci=%d in invalid state=%d, flags=%#x\n", __func__, dlci, pcb->state, pcb->flags); error = ENOENT; break; } mtx_unlock(&pcb->pcb_mtx); } else { NG_BTSOCKET_RFCOMM_WARN( "%s: Got UA for non-existing dlci=%d\n", __func__, dlci); error = ng_btsocket_rfcomm_send_command(s,RFCOMM_FRAME_DM,dlci); } return (error); } /* ng_btsocket_rfcomm_receive_ua */ /* * Process RFCOMM DM frame */ static int ng_btsocket_rfcomm_receive_dm(ng_btsocket_rfcomm_session_p s, int dlci) { ng_btsocket_rfcomm_pcb_p pcb = NULL; int error; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got DM, session state=%d, flags=%#x, mtu=%d, dlci=%d\n", __func__, s->state, s->flags, s->mtu, dlci); /* DLCI == 0 means multiplexor channel */ if (dlci == 0) { /* Disconnect all dlc's on the session */ s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; ng_btsocket_rfcomm_session_clean(s); } else { pcb = ng_btsocket_rfcomm_pcb_by_dlci(s, dlci); if (pcb != NULL) { mtx_lock(&pcb->pcb_mtx); NG_BTSOCKET_RFCOMM_INFO( "%s: Got DM for dlci=%d, state=%d, flags=%#x\n", __func__, dlci, pcb->state, pcb->flags); if (pcb->state == NG_BTSOCKET_RFCOMM_DLC_CONNECTED) error = ECONNRESET; else error = ECONNREFUSED; ng_btsocket_rfcomm_pcb_kill(pcb, error); mtx_unlock(&pcb->pcb_mtx); } else NG_BTSOCKET_RFCOMM_WARN( "%s: Got DM for non-existing dlci=%d\n", __func__, dlci); } return (0); } /* ng_btsocket_rfcomm_receive_dm */ /* * Process RFCOMM UIH frame (data) */ static int ng_btsocket_rfcomm_receive_uih(ng_btsocket_rfcomm_session_p s, int dlci, int pf, struct mbuf *m0) { ng_btsocket_rfcomm_pcb_p pcb = NULL; int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got UIH, session state=%d, flags=%#x, mtu=%d, dlci=%d, pf=%d, len=%d\n", __func__, s->state, s->flags, s->mtu, dlci, pf, m0->m_pkthdr.len); /* XXX should we do it here? Check for session flow control */ if (s->flags & NG_BTSOCKET_RFCOMM_SESSION_LFC) { NG_BTSOCKET_RFCOMM_WARN( "%s: Got UIH with session flow control asserted, state=%d, flags=%#x\n", __func__, s->state, s->flags); goto drop; } /* Check if we have this dlci */ pcb = ng_btsocket_rfcomm_pcb_by_dlci(s, dlci); if (pcb == NULL) { NG_BTSOCKET_RFCOMM_WARN( "%s: Got UIH for non-existing dlci=%d\n", __func__, dlci); error = ng_btsocket_rfcomm_send_command(s,RFCOMM_FRAME_DM,dlci); goto drop; } mtx_lock(&pcb->pcb_mtx); /* Check dlci state */ if (pcb->state != NG_BTSOCKET_RFCOMM_DLC_CONNECTED) { NG_BTSOCKET_RFCOMM_WARN( "%s: Got UIH for dlci=%d in invalid state=%d, flags=%#x\n", __func__, dlci, pcb->state, pcb->flags); error = EINVAL; goto drop1; } /* Check dlci flow control */ if (((pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC) && pcb->rx_cred <= 0) || (pcb->lmodem & RFCOMM_MODEM_FC)) { NG_BTSOCKET_RFCOMM_ERR( "%s: Got UIH for dlci=%d with asserted flow control, state=%d, " \ "flags=%#x, rx_cred=%d, lmodem=%#x\n", __func__, dlci, pcb->state, pcb->flags, pcb->rx_cred, pcb->lmodem); goto drop1; } /* Did we get any credits? */ if ((pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC) && pf) { NG_BTSOCKET_RFCOMM_INFO( "%s: Got %d more credits for dlci=%d, state=%d, flags=%#x, " \ "rx_cred=%d, tx_cred=%d\n", __func__, *mtod(m0, u_int8_t *), dlci, pcb->state, pcb->flags, pcb->rx_cred, pcb->tx_cred); pcb->tx_cred += *mtod(m0, u_int8_t *); m_adj(m0, 1); /* Send more from the DLC. XXX check for errors? */ ng_btsocket_rfcomm_pcb_send(pcb, ALOT); } /* OK the of the rest of the mbuf is the data */ if (m0->m_pkthdr.len > 0) { /* If we are using credit flow control decrease rx_cred here */ if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC) { /* Give remote peer more credits (if needed) */ if (-- pcb->rx_cred <= RFCOMM_MAX_CREDITS / 2) ng_btsocket_rfcomm_send_credits(pcb); else NG_BTSOCKET_RFCOMM_INFO( "%s: Remote side still has credits, dlci=%d, state=%d, flags=%#x, " \ "rx_cred=%d, tx_cred=%d\n", __func__, dlci, pcb->state, pcb->flags, pcb->rx_cred, pcb->tx_cred); } /* Check packet against mtu on dlci */ if (m0->m_pkthdr.len > pcb->mtu) { NG_BTSOCKET_RFCOMM_ERR( "%s: Got oversized UIH for dlci=%d, state=%d, flags=%#x, mtu=%d, len=%d\n", __func__, dlci, pcb->state, pcb->flags, pcb->mtu, m0->m_pkthdr.len); error = EMSGSIZE; } else if (m0->m_pkthdr.len > sbspace(&pcb->so->so_rcv)) { /* * This is really bad. Receive queue on socket does * not have enough space for the packet. We do not * have any other choice but drop the packet. */ NG_BTSOCKET_RFCOMM_ERR( "%s: Not enough space in socket receive queue. Dropping UIH for dlci=%d, " \ "state=%d, flags=%#x, len=%d, space=%ld\n", __func__, dlci, pcb->state, pcb->flags, m0->m_pkthdr.len, sbspace(&pcb->so->so_rcv)); error = ENOBUFS; } else { /* Append packet to the socket receive queue */ sbappend(&pcb->so->so_rcv, m0, 0); m0 = NULL; sorwakeup(pcb->so); } } drop1: mtx_unlock(&pcb->pcb_mtx); drop: NG_FREE_M(m0); /* checks for != NULL */ return (error); } /* ng_btsocket_rfcomm_receive_uih */ /* * Process RFCOMM MCC command (Multiplexor) * * From TS 07.10 spec * * "5.4.3.1 Information Data * * ...The frames (UIH) sent by the initiating station have the C/R bit set * to 1 and those sent by the responding station have the C/R bit set to 0..." * * "5.4.6.2 Operating procedures * * Messages always exist in pairs; a command message and a corresponding * response message. If the C/R bit is set to 1 the message is a command, * if it is set to 0 the message is a response... * * ... * * NOTE: Notice that when UIH frames are used to convey information on DLCI 0 * there are at least two different fields that contain a C/R bit, and the * bits are set of different form. The C/R bit in the Type field shall be set * as it is stated above, while the C/R bit in the Address field (see subclause * 5.2.1.2) shall be set as it is described in subclause 5.4.3.1." */ static int ng_btsocket_rfcomm_receive_mcc(ng_btsocket_rfcomm_session_p s, struct mbuf *m0) { struct rfcomm_mcc_hdr *hdr = NULL; u_int8_t cr, type, length; mtx_assert(&s->session_mtx, MA_OWNED); /* * We can access data directly in the first mbuf, because we have * m_pullup()'ed mbuf chain in ng_btsocket_rfcomm_receive_frame(). * All MCC commands should fit into single mbuf (except probably TEST). */ hdr = mtod(m0, struct rfcomm_mcc_hdr *); cr = RFCOMM_CR(hdr->type); type = RFCOMM_MCC_TYPE(hdr->type); length = RFCOMM_MCC_LENGTH(hdr->length); /* Check MCC frame length */ if (sizeof(*hdr) + length != m0->m_pkthdr.len) { NG_BTSOCKET_RFCOMM_ERR( "%s: Invalid MCC frame length=%d, len=%d\n", __func__, length, m0->m_pkthdr.len); NG_FREE_M(m0); return (EMSGSIZE); } switch (type) { case RFCOMM_MCC_TEST: return (ng_btsocket_rfcomm_receive_test(s, m0)); /* NOT REACHED */ case RFCOMM_MCC_FCON: case RFCOMM_MCC_FCOFF: return (ng_btsocket_rfcomm_receive_fc(s, m0)); /* NOT REACHED */ case RFCOMM_MCC_MSC: return (ng_btsocket_rfcomm_receive_msc(s, m0)); /* NOT REACHED */ case RFCOMM_MCC_RPN: return (ng_btsocket_rfcomm_receive_rpn(s, m0)); /* NOT REACHED */ case RFCOMM_MCC_RLS: return (ng_btsocket_rfcomm_receive_rls(s, m0)); /* NOT REACHED */ case RFCOMM_MCC_PN: return (ng_btsocket_rfcomm_receive_pn(s, m0)); /* NOT REACHED */ case RFCOMM_MCC_NSC: NG_BTSOCKET_RFCOMM_ERR( "%s: Got MCC NSC, type=%#x, cr=%d, length=%d, session state=%d, flags=%#x, " \ "mtu=%d, len=%d\n", __func__, RFCOMM_MCC_TYPE(*((u_int8_t *)(hdr + 1))), cr, length, s->state, s->flags, s->mtu, m0->m_pkthdr.len); NG_FREE_M(m0); break; default: NG_BTSOCKET_RFCOMM_ERR( "%s: Got unknown MCC, type=%#x, cr=%d, length=%d, session state=%d, " \ "flags=%#x, mtu=%d, len=%d\n", __func__, type, cr, length, s->state, s->flags, s->mtu, m0->m_pkthdr.len); /* Reuse mbuf to send NSC */ hdr = mtod(m0, struct rfcomm_mcc_hdr *); m0->m_pkthdr.len = m0->m_len = sizeof(*hdr); /* Create MCC NSC header */ hdr->type = RFCOMM_MKMCC_TYPE(0, RFCOMM_MCC_NSC); hdr->length = RFCOMM_MKLEN8(1); /* Put back MCC command type we did not like */ m0->m_data[m0->m_len] = RFCOMM_MKMCC_TYPE(cr, type); m0->m_pkthdr.len ++; m0->m_len ++; /* Send UIH frame */ return (ng_btsocket_rfcomm_send_uih(s, RFCOMM_MKADDRESS(INITIATOR(s), 0), 0, 0, m0)); /* NOT REACHED */ } return (0); } /* ng_btsocket_rfcomm_receive_mcc */ /* * Receive RFCOMM TEST MCC command */ static int ng_btsocket_rfcomm_receive_test(ng_btsocket_rfcomm_session_p s, struct mbuf *m0) { struct rfcomm_mcc_hdr *hdr = mtod(m0, struct rfcomm_mcc_hdr *); int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got MCC TEST, cr=%d, length=%d, session state=%d, flags=%#x, mtu=%d, " \ "len=%d\n", __func__, RFCOMM_CR(hdr->type), RFCOMM_MCC_LENGTH(hdr->length), s->state, s->flags, s->mtu, m0->m_pkthdr.len); if (RFCOMM_CR(hdr->type)) { hdr->type = RFCOMM_MKMCC_TYPE(0, RFCOMM_MCC_TEST); error = ng_btsocket_rfcomm_send_uih(s, RFCOMM_MKADDRESS(INITIATOR(s), 0), 0, 0, m0); } else NG_FREE_M(m0); /* XXX ignore response */ return (error); } /* ng_btsocket_rfcomm_receive_test */ /* * Receive RFCOMM FCON/FCOFF MCC command */ static int ng_btsocket_rfcomm_receive_fc(ng_btsocket_rfcomm_session_p s, struct mbuf *m0) { struct rfcomm_mcc_hdr *hdr = mtod(m0, struct rfcomm_mcc_hdr *); u_int8_t type = RFCOMM_MCC_TYPE(hdr->type); int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); /* * Turn ON/OFF aggregate flow on the entire session. When remote peer * asserted flow control no transmission shall occur except on dlci 0 * (control channel). */ NG_BTSOCKET_RFCOMM_INFO( "%s: Got MCC FC%s, cr=%d, length=%d, session state=%d, flags=%#x, mtu=%d, " \ "len=%d\n", __func__, (type == RFCOMM_MCC_FCON)? "ON" : "OFF", RFCOMM_CR(hdr->type), RFCOMM_MCC_LENGTH(hdr->length), s->state, s->flags, s->mtu, m0->m_pkthdr.len); if (RFCOMM_CR(hdr->type)) { if (type == RFCOMM_MCC_FCON) s->flags &= ~NG_BTSOCKET_RFCOMM_SESSION_RFC; else s->flags |= NG_BTSOCKET_RFCOMM_SESSION_RFC; hdr->type = RFCOMM_MKMCC_TYPE(0, type); error = ng_btsocket_rfcomm_send_uih(s, RFCOMM_MKADDRESS(INITIATOR(s), 0), 0, 0, m0); } else NG_FREE_M(m0); /* XXX ignore response */ return (error); } /* ng_btsocket_rfcomm_receive_fc */ /* * Receive RFCOMM MSC MCC command */ static int ng_btsocket_rfcomm_receive_msc(ng_btsocket_rfcomm_session_p s, struct mbuf *m0) { struct rfcomm_mcc_hdr *hdr = mtod(m0, struct rfcomm_mcc_hdr*); struct rfcomm_mcc_msc *msc = (struct rfcomm_mcc_msc *)(hdr+1); ng_btsocket_rfcomm_pcb_t *pcb = NULL; int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got MCC MSC, dlci=%d, cr=%d, length=%d, session state=%d, flags=%#x, " \ "mtu=%d, len=%d\n", __func__, RFCOMM_DLCI(msc->address), RFCOMM_CR(hdr->type), RFCOMM_MCC_LENGTH(hdr->length), s->state, s->flags, s->mtu, m0->m_pkthdr.len); if (RFCOMM_CR(hdr->type)) { pcb = ng_btsocket_rfcomm_pcb_by_dlci(s, RFCOMM_DLCI(msc->address)); if (pcb == NULL) { NG_BTSOCKET_RFCOMM_WARN( "%s: Got MSC command for non-existing dlci=%d\n", __func__, RFCOMM_DLCI(msc->address)); NG_FREE_M(m0); return (ENOENT); } mtx_lock(&pcb->pcb_mtx); if (pcb->state != NG_BTSOCKET_RFCOMM_DLC_CONNECTING && pcb->state != NG_BTSOCKET_RFCOMM_DLC_CONNECTED) { NG_BTSOCKET_RFCOMM_WARN( "%s: Got MSC on dlci=%d in invalid state=%d\n", __func__, RFCOMM_DLCI(msc->address), pcb->state); mtx_unlock(&pcb->pcb_mtx); NG_FREE_M(m0); return (EINVAL); } pcb->rmodem = msc->modem; /* Update remote port signals */ hdr->type = RFCOMM_MKMCC_TYPE(0, RFCOMM_MCC_MSC); error = ng_btsocket_rfcomm_send_uih(s, RFCOMM_MKADDRESS(INITIATOR(s), 0), 0, 0, m0); #if 0 /* YYY */ /* Send more data from DLC. XXX check for errors? */ if (!(pcb->rmodem & RFCOMM_MODEM_FC) && !(pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC)) ng_btsocket_rfcomm_pcb_send(pcb, ALOT); #endif /* YYY */ mtx_unlock(&pcb->pcb_mtx); } else NG_FREE_M(m0); /* XXX ignore response */ return (error); } /* ng_btsocket_rfcomm_receive_msc */ /* * Receive RFCOMM RPN MCC command * XXX FIXME do we need htole16/le16toh for RPN param_mask? */ static int ng_btsocket_rfcomm_receive_rpn(ng_btsocket_rfcomm_session_p s, struct mbuf *m0) { struct rfcomm_mcc_hdr *hdr = mtod(m0, struct rfcomm_mcc_hdr *); struct rfcomm_mcc_rpn *rpn = (struct rfcomm_mcc_rpn *)(hdr + 1); int error = 0; u_int16_t param_mask; u_int8_t bit_rate, data_bits, stop_bits, parity, flow_control, xon_char, xoff_char; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got MCC RPN, dlci=%d, cr=%d, length=%d, session state=%d, flags=%#x, " \ "mtu=%d, len=%d\n", __func__, RFCOMM_DLCI(rpn->dlci), RFCOMM_CR(hdr->type), RFCOMM_MCC_LENGTH(hdr->length), s->state, s->flags, s->mtu, m0->m_pkthdr.len); if (RFCOMM_CR(hdr->type)) { param_mask = RFCOMM_RPN_PM_ALL; if (RFCOMM_MCC_LENGTH(hdr->length) == 1) { /* Request - return default setting */ bit_rate = RFCOMM_RPN_BR_115200; data_bits = RFCOMM_RPN_DATA_8; stop_bits = RFCOMM_RPN_STOP_1; parity = RFCOMM_RPN_PARITY_NONE; flow_control = RFCOMM_RPN_FLOW_NONE; xon_char = RFCOMM_RPN_XON_CHAR; xoff_char = RFCOMM_RPN_XOFF_CHAR; } else { /* * Ignore/accept bit_rate, 8 bits, 1 stop bit, no * parity, no flow control lines, default XON/XOFF * chars. */ bit_rate = rpn->bit_rate; rpn->param_mask = le16toh(rpn->param_mask); /* XXX */ data_bits = RFCOMM_RPN_DATA_BITS(rpn->line_settings); if (rpn->param_mask & RFCOMM_RPN_PM_DATA && data_bits != RFCOMM_RPN_DATA_8) { data_bits = RFCOMM_RPN_DATA_8; param_mask ^= RFCOMM_RPN_PM_DATA; } stop_bits = RFCOMM_RPN_STOP_BITS(rpn->line_settings); if (rpn->param_mask & RFCOMM_RPN_PM_STOP && stop_bits != RFCOMM_RPN_STOP_1) { stop_bits = RFCOMM_RPN_STOP_1; param_mask ^= RFCOMM_RPN_PM_STOP; } parity = RFCOMM_RPN_PARITY(rpn->line_settings); if (rpn->param_mask & RFCOMM_RPN_PM_PARITY && parity != RFCOMM_RPN_PARITY_NONE) { parity = RFCOMM_RPN_PARITY_NONE; param_mask ^= RFCOMM_RPN_PM_PARITY; } flow_control = rpn->flow_control; if (rpn->param_mask & RFCOMM_RPN_PM_FLOW && flow_control != RFCOMM_RPN_FLOW_NONE) { flow_control = RFCOMM_RPN_FLOW_NONE; param_mask ^= RFCOMM_RPN_PM_FLOW; } xon_char = rpn->xon_char; if (rpn->param_mask & RFCOMM_RPN_PM_XON && xon_char != RFCOMM_RPN_XON_CHAR) { xon_char = RFCOMM_RPN_XON_CHAR; param_mask ^= RFCOMM_RPN_PM_XON; } xoff_char = rpn->xoff_char; if (rpn->param_mask & RFCOMM_RPN_PM_XOFF && xoff_char != RFCOMM_RPN_XOFF_CHAR) { xoff_char = RFCOMM_RPN_XOFF_CHAR; param_mask ^= RFCOMM_RPN_PM_XOFF; } } rpn->bit_rate = bit_rate; rpn->line_settings = RFCOMM_MKRPN_LINE_SETTINGS(data_bits, stop_bits, parity); rpn->flow_control = flow_control; rpn->xon_char = xon_char; rpn->xoff_char = xoff_char; rpn->param_mask = htole16(param_mask); /* XXX */ m0->m_pkthdr.len = m0->m_len = sizeof(*hdr) + sizeof(*rpn); hdr->type = RFCOMM_MKMCC_TYPE(0, RFCOMM_MCC_RPN); error = ng_btsocket_rfcomm_send_uih(s, RFCOMM_MKADDRESS(INITIATOR(s), 0), 0, 0, m0); } else NG_FREE_M(m0); /* XXX ignore response */ return (error); } /* ng_btsocket_rfcomm_receive_rpn */ /* * Receive RFCOMM RLS MCC command */ static int ng_btsocket_rfcomm_receive_rls(ng_btsocket_rfcomm_session_p s, struct mbuf *m0) { struct rfcomm_mcc_hdr *hdr = mtod(m0, struct rfcomm_mcc_hdr *); struct rfcomm_mcc_rls *rls = (struct rfcomm_mcc_rls *)(hdr + 1); int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); /* * XXX FIXME Do we have to do anything else here? Remote peer tries to * tell us something about DLCI. Just report what we have received and * return back received values as required by TS 07.10 spec. */ NG_BTSOCKET_RFCOMM_INFO( "%s: Got MCC RLS, dlci=%d, status=%#x, cr=%d, length=%d, session state=%d, " \ "flags=%#x, mtu=%d, len=%d\n", __func__, RFCOMM_DLCI(rls->address), rls->status, RFCOMM_CR(hdr->type), RFCOMM_MCC_LENGTH(hdr->length), s->state, s->flags, s->mtu, m0->m_pkthdr.len); if (RFCOMM_CR(hdr->type)) { if (rls->status & 0x1) NG_BTSOCKET_RFCOMM_ERR( "%s: Got RLS dlci=%d, error=%#x\n", __func__, RFCOMM_DLCI(rls->address), rls->status >> 1); hdr->type = RFCOMM_MKMCC_TYPE(0, RFCOMM_MCC_RLS); error = ng_btsocket_rfcomm_send_uih(s, RFCOMM_MKADDRESS(INITIATOR(s), 0), 0, 0, m0); } else NG_FREE_M(m0); /* XXX ignore responses */ return (error); } /* ng_btsocket_rfcomm_receive_rls */ /* * Receive RFCOMM PN MCC command */ static int ng_btsocket_rfcomm_receive_pn(ng_btsocket_rfcomm_session_p s, struct mbuf *m0) { struct rfcomm_mcc_hdr *hdr = mtod(m0, struct rfcomm_mcc_hdr*); struct rfcomm_mcc_pn *pn = (struct rfcomm_mcc_pn *)(hdr+1); ng_btsocket_rfcomm_pcb_t *pcb = NULL; int error = 0; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Got MCC PN, dlci=%d, cr=%d, length=%d, flow_control=%#x, priority=%d, " \ "ack_timer=%d, mtu=%d, max_retrans=%d, credits=%d, session state=%d, " \ "flags=%#x, session mtu=%d, len=%d\n", __func__, pn->dlci, RFCOMM_CR(hdr->type), RFCOMM_MCC_LENGTH(hdr->length), pn->flow_control, pn->priority, pn->ack_timer, le16toh(pn->mtu), pn->max_retrans, pn->credits, s->state, s->flags, s->mtu, m0->m_pkthdr.len); if (pn->dlci == 0) { NG_BTSOCKET_RFCOMM_ERR("%s: Zero dlci in MCC PN\n", __func__); NG_FREE_M(m0); return (EINVAL); } /* Check if we have this dlci */ pcb = ng_btsocket_rfcomm_pcb_by_dlci(s, pn->dlci); if (pcb != NULL) { mtx_lock(&pcb->pcb_mtx); if (RFCOMM_CR(hdr->type)) { /* PN Request */ ng_btsocket_rfcomm_set_pn(pcb, 1, pn->flow_control, pn->credits, pn->mtu); if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC) { pn->flow_control = 0xe0; pn->credits = RFCOMM_DEFAULT_CREDITS; } else { pn->flow_control = 0; pn->credits = 0; } hdr->type = RFCOMM_MKMCC_TYPE(0, RFCOMM_MCC_PN); error = ng_btsocket_rfcomm_send_uih(s, RFCOMM_MKADDRESS(INITIATOR(s), 0), 0, 0, m0); } else { /* PN Response - proceed with SABM. Timeout still set */ if (pcb->state == NG_BTSOCKET_RFCOMM_DLC_CONFIGURING) { ng_btsocket_rfcomm_set_pn(pcb, 0, pn->flow_control, pn->credits, pn->mtu); pcb->state = NG_BTSOCKET_RFCOMM_DLC_CONNECTING; error = ng_btsocket_rfcomm_send_command(s, RFCOMM_FRAME_SABM, pn->dlci); } else NG_BTSOCKET_RFCOMM_WARN( "%s: Got PN response for dlci=%d in invalid state=%d\n", __func__, pn->dlci, pcb->state); NG_FREE_M(m0); } mtx_unlock(&pcb->pcb_mtx); } else if (RFCOMM_CR(hdr->type)) { /* PN request to non-existing dlci - incoming connection */ pcb = ng_btsocket_rfcomm_connect_ind(s, RFCOMM_SRVCHANNEL(pn->dlci)); if (pcb != NULL) { mtx_lock(&pcb->pcb_mtx); pcb->dlci = pn->dlci; ng_btsocket_rfcomm_set_pn(pcb, 1, pn->flow_control, pn->credits, pn->mtu); if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC) { pn->flow_control = 0xe0; pn->credits = RFCOMM_DEFAULT_CREDITS; } else { pn->flow_control = 0; pn->credits = 0; } hdr->type = RFCOMM_MKMCC_TYPE(0, RFCOMM_MCC_PN); error = ng_btsocket_rfcomm_send_uih(s, RFCOMM_MKADDRESS(INITIATOR(s), 0), 0, 0, m0); if (error == 0) { ng_btsocket_rfcomm_timeout(pcb); pcb->state = NG_BTSOCKET_RFCOMM_DLC_CONNECTING; soisconnecting(pcb->so); } else ng_btsocket_rfcomm_pcb_kill(pcb, error); mtx_unlock(&pcb->pcb_mtx); } else { /* Nobody is listen()ing on this channel */ error = ng_btsocket_rfcomm_send_command(s, RFCOMM_FRAME_DM, pn->dlci); NG_FREE_M(m0); } } else NG_FREE_M(m0); /* XXX ignore response to non-existing dlci */ return (error); } /* ng_btsocket_rfcomm_receive_pn */ /* * Set PN parameters for dlci. Caller must hold pcb->pcb_mtx. * * From Bluetooth spec. * * "... The CL1 - CL4 field is completely redefined. (In TS07.10 this defines * the convergence layer to use, which is not applicable to RFCOMM. In RFCOMM, * in Bluetooth versions up to 1.0B, this field was forced to 0). * * In the PN request sent prior to a DLC establishment, this field must contain * the value 15 (0xF), indicating support of credit based flow control in the * sender. See Table 5.3 below. If the PN response contains any other value * than 14 (0xE) in this field, it is inferred that the peer RFCOMM entity is * not supporting the credit based flow control feature. (This is only possible * if the peer RFCOMM implementation is only conforming to Bluetooth version * 1.0B.) If a PN request is sent on an already open DLC, then this field must * contain the value zero; it is not possible to set initial credits more * than once per DLC activation. A responding implementation must set this * field in the PN response to 14 (0xE), if (and only if) the value in the PN * request was 15..." */ static void ng_btsocket_rfcomm_set_pn(ng_btsocket_rfcomm_pcb_p pcb, u_int8_t cr, u_int8_t flow_control, u_int8_t credits, u_int16_t mtu) { mtx_assert(&pcb->pcb_mtx, MA_OWNED); pcb->mtu = le16toh(mtu); if (cr) { if (flow_control == 0xf0) { pcb->flags |= NG_BTSOCKET_RFCOMM_DLC_CFC; pcb->tx_cred = credits; } else { pcb->flags &= ~NG_BTSOCKET_RFCOMM_DLC_CFC; pcb->tx_cred = 0; } } else { if (flow_control == 0xe0) { pcb->flags |= NG_BTSOCKET_RFCOMM_DLC_CFC; pcb->tx_cred = credits; } else { pcb->flags &= ~NG_BTSOCKET_RFCOMM_DLC_CFC; pcb->tx_cred = 0; } } NG_BTSOCKET_RFCOMM_INFO( "%s: cr=%d, dlci=%d, state=%d, flags=%#x, mtu=%d, rx_cred=%d, tx_cred=%d\n", __func__, cr, pcb->dlci, pcb->state, pcb->flags, pcb->mtu, pcb->rx_cred, pcb->tx_cred); } /* ng_btsocket_rfcomm_set_pn */ /* * Send RFCOMM SABM/DISC/UA/DM frames. Caller must hold s->session_mtx */ static int ng_btsocket_rfcomm_send_command(ng_btsocket_rfcomm_session_p s, u_int8_t type, u_int8_t dlci) { struct rfcomm_cmd_hdr *hdr = NULL; struct mbuf *m = NULL; int cr; mtx_assert(&s->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Sending command type %#x, session state=%d, flags=%#x, mtu=%d, dlci=%d\n", __func__, type, s->state, s->flags, s->mtu, dlci); switch (type) { case RFCOMM_FRAME_SABM: case RFCOMM_FRAME_DISC: cr = INITIATOR(s); break; case RFCOMM_FRAME_UA: case RFCOMM_FRAME_DM: cr = !INITIATOR(s); break; default: panic("%s: Invalid frame type=%#x\n", __func__, type); return (EINVAL); /* NOT REACHED */ } MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = sizeof(*hdr); hdr = mtod(m, struct rfcomm_cmd_hdr *); hdr->address = RFCOMM_MKADDRESS(cr, dlci); hdr->control = RFCOMM_MKCONTROL(type, 1); hdr->length = RFCOMM_MKLEN8(0); hdr->fcs = ng_btsocket_rfcomm_fcs3((u_int8_t *) hdr); NG_BT_MBUFQ_ENQUEUE(&s->outq, m); return (0); } /* ng_btsocket_rfcomm_send_command */ /* * Send RFCOMM UIH frame. Caller must hold s->session_mtx */ static int ng_btsocket_rfcomm_send_uih(ng_btsocket_rfcomm_session_p s, u_int8_t address, u_int8_t pf, u_int8_t credits, struct mbuf *data) { struct rfcomm_frame_hdr *hdr = NULL; struct mbuf *m = NULL, *mcrc = NULL; u_int16_t length; mtx_assert(&s->session_mtx, MA_OWNED); MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { NG_FREE_M(data); return (ENOBUFS); } m->m_pkthdr.len = m->m_len = sizeof(*hdr); MGET(mcrc, M_NOWAIT, MT_DATA); if (mcrc == NULL) { NG_FREE_M(data); return (ENOBUFS); } mcrc->m_len = 1; /* Fill UIH frame header */ hdr = mtod(m, struct rfcomm_frame_hdr *); hdr->address = address; hdr->control = RFCOMM_MKCONTROL(RFCOMM_FRAME_UIH, pf); /* Calculate FCS */ mcrc->m_data[0] = ng_btsocket_rfcomm_fcs2((u_int8_t *) hdr); /* Put length back */ length = (data != NULL)? data->m_pkthdr.len : 0; if (length > 127) { u_int16_t l = htole16(RFCOMM_MKLEN16(length)); bcopy(&l, &hdr->length, sizeof(l)); m->m_pkthdr.len ++; m->m_len ++; } else hdr->length = RFCOMM_MKLEN8(length); if (pf) { m->m_data[m->m_len] = credits; m->m_pkthdr.len ++; m->m_len ++; } /* Add payload */ if (data != NULL) { m_cat(m, data); m->m_pkthdr.len += length; } /* Put FCS back */ m_cat(m, mcrc); m->m_pkthdr.len ++; NG_BTSOCKET_RFCOMM_INFO( "%s: Sending UIH state=%d, flags=%#x, address=%d, length=%d, pf=%d, " \ "credits=%d, len=%d\n", __func__, s->state, s->flags, address, length, pf, credits, m->m_pkthdr.len); NG_BT_MBUFQ_ENQUEUE(&s->outq, m); return (0); } /* ng_btsocket_rfcomm_send_uih */ /* * Send MSC request. Caller must hold pcb->pcb_mtx and pcb->session->session_mtx */ static int ng_btsocket_rfcomm_send_msc(ng_btsocket_rfcomm_pcb_p pcb) { struct mbuf *m = NULL; struct rfcomm_mcc_hdr *hdr = NULL; struct rfcomm_mcc_msc *msc = NULL; mtx_assert(&pcb->session->session_mtx, MA_OWNED); mtx_assert(&pcb->pcb_mtx, MA_OWNED); MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = sizeof(*hdr) + sizeof(*msc); hdr = mtod(m, struct rfcomm_mcc_hdr *); msc = (struct rfcomm_mcc_msc *)(hdr + 1); hdr->type = RFCOMM_MKMCC_TYPE(1, RFCOMM_MCC_MSC); hdr->length = RFCOMM_MKLEN8(sizeof(*msc)); msc->address = RFCOMM_MKADDRESS(1, pcb->dlci); msc->modem = pcb->lmodem; NG_BTSOCKET_RFCOMM_INFO( "%s: Sending MSC dlci=%d, state=%d, flags=%#x, address=%d, modem=%#x\n", __func__, pcb->dlci, pcb->state, pcb->flags, msc->address, msc->modem); return (ng_btsocket_rfcomm_send_uih(pcb->session, RFCOMM_MKADDRESS(INITIATOR(pcb->session), 0), 0, 0, m)); } /* ng_btsocket_rfcomm_send_msc */ /* * Send PN request. Caller must hold pcb->pcb_mtx and pcb->session->session_mtx */ static int ng_btsocket_rfcomm_send_pn(ng_btsocket_rfcomm_pcb_p pcb) { struct mbuf *m = NULL; struct rfcomm_mcc_hdr *hdr = NULL; struct rfcomm_mcc_pn *pn = NULL; mtx_assert(&pcb->session->session_mtx, MA_OWNED); mtx_assert(&pcb->pcb_mtx, MA_OWNED); MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = sizeof(*hdr) + sizeof(*pn); hdr = mtod(m, struct rfcomm_mcc_hdr *); pn = (struct rfcomm_mcc_pn *)(hdr + 1); hdr->type = RFCOMM_MKMCC_TYPE(1, RFCOMM_MCC_PN); hdr->length = RFCOMM_MKLEN8(sizeof(*pn)); pn->dlci = pcb->dlci; /* * Set default DLCI priority as described in GSM 07.10 * (ETSI TS 101 369) clause 5.6 page 42 */ pn->priority = (pcb->dlci < 56)? (((pcb->dlci >> 3) << 3) + 7) : 61; pn->ack_timer = 0; pn->mtu = htole16(pcb->mtu); pn->max_retrans = 0; if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC) { pn->flow_control = 0xf0; pn->credits = pcb->rx_cred; } else { pn->flow_control = 0; pn->credits = 0; } NG_BTSOCKET_RFCOMM_INFO( "%s: Sending PN dlci=%d, state=%d, flags=%#x, mtu=%d, flow_control=%#x, " \ "credits=%d\n", __func__, pcb->dlci, pcb->state, pcb->flags, pcb->mtu, pn->flow_control, pn->credits); return (ng_btsocket_rfcomm_send_uih(pcb->session, RFCOMM_MKADDRESS(INITIATOR(pcb->session), 0), 0, 0, m)); } /* ng_btsocket_rfcomm_send_pn */ /* * Calculate and send credits based on available space in receive buffer */ static int ng_btsocket_rfcomm_send_credits(ng_btsocket_rfcomm_pcb_p pcb) { int error = 0; u_int8_t credits; mtx_assert(&pcb->pcb_mtx, MA_OWNED); mtx_assert(&pcb->session->session_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Sending more credits, dlci=%d, state=%d, flags=%#x, mtu=%d, " \ "space=%ld, tx_cred=%d, rx_cred=%d\n", __func__, pcb->dlci, pcb->state, pcb->flags, pcb->mtu, sbspace(&pcb->so->so_rcv), pcb->tx_cred, pcb->rx_cred); credits = sbspace(&pcb->so->so_rcv) / pcb->mtu; if (credits > 0) { if (pcb->rx_cred + credits > RFCOMM_MAX_CREDITS) credits = RFCOMM_MAX_CREDITS - pcb->rx_cred; error = ng_btsocket_rfcomm_send_uih( pcb->session, RFCOMM_MKADDRESS(INITIATOR(pcb->session), pcb->dlci), 1, credits, NULL); if (error == 0) { pcb->rx_cred += credits; NG_BTSOCKET_RFCOMM_INFO( "%s: Gave remote side %d more credits, dlci=%d, state=%d, flags=%#x, " \ "rx_cred=%d, tx_cred=%d\n", __func__, credits, pcb->dlci, pcb->state, pcb->flags, pcb->rx_cred, pcb->tx_cred); } else NG_BTSOCKET_RFCOMM_ERR( "%s: Could not send credits, error=%d, dlci=%d, state=%d, flags=%#x, " \ "mtu=%d, space=%ld, tx_cred=%d, rx_cred=%d\n", __func__, error, pcb->dlci, pcb->state, pcb->flags, pcb->mtu, sbspace(&pcb->so->so_rcv), pcb->tx_cred, pcb->rx_cred); } return (error); } /* ng_btsocket_rfcomm_send_credits */ /***************************************************************************** ***************************************************************************** ** RFCOMM DLCs ***************************************************************************** *****************************************************************************/ /* * Send data from socket send buffer * Caller must hold pcb->pcb_mtx and pcb->session->session_mtx */ static int ng_btsocket_rfcomm_pcb_send(ng_btsocket_rfcomm_pcb_p pcb, int limit) { struct mbuf *m = NULL; int sent, length, error; mtx_assert(&pcb->session->session_mtx, MA_OWNED); mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC) limit = min(limit, pcb->tx_cred); else if (!(pcb->rmodem & RFCOMM_MODEM_FC)) limit = min(limit, RFCOMM_MAX_CREDITS); /* XXX ??? */ else limit = 0; if (limit == 0) { NG_BTSOCKET_RFCOMM_INFO( "%s: Could not send - remote flow control asserted, dlci=%d, flags=%#x, " \ "rmodem=%#x, tx_cred=%d\n", __func__, pcb->dlci, pcb->flags, pcb->rmodem, pcb->tx_cred); return (0); } for (error = 0, sent = 0; sent < limit; sent ++) { length = min(pcb->mtu, sbavail(&pcb->so->so_snd)); if (length == 0) break; /* Get the chunk from the socket's send buffer */ m = ng_btsocket_rfcomm_prepare_packet(&pcb->so->so_snd, length); if (m == NULL) { error = ENOBUFS; break; } sbdrop(&pcb->so->so_snd, length); error = ng_btsocket_rfcomm_send_uih(pcb->session, RFCOMM_MKADDRESS(INITIATOR(pcb->session), pcb->dlci), 0, 0, m); if (error != 0) break; } if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_CFC) pcb->tx_cred -= sent; if (error == 0 && sent > 0) { pcb->flags &= ~NG_BTSOCKET_RFCOMM_DLC_SENDING; sowwakeup(pcb->so); } return (error); } /* ng_btsocket_rfcomm_pcb_send */ /* * Unlink and disconnect DLC. If ng_btsocket_rfcomm_pcb_kill() returns * non zero value than socket has no reference and has to be detached. * Caller must hold pcb->pcb_mtx and pcb->session->session_mtx */ static void ng_btsocket_rfcomm_pcb_kill(ng_btsocket_rfcomm_pcb_p pcb, int error) { ng_btsocket_rfcomm_session_p s = pcb->session; NG_BTSOCKET_RFCOMM_INFO( "%s: Killing DLC, so=%p, dlci=%d, state=%d, flags=%#x, error=%d\n", __func__, pcb->so, pcb->dlci, pcb->state, pcb->flags, error); if (pcb->session == NULL) panic("%s: DLC without session, pcb=%p, state=%d, flags=%#x\n", __func__, pcb, pcb->state, pcb->flags); mtx_assert(&pcb->session->session_mtx, MA_OWNED); mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMO) ng_btsocket_rfcomm_untimeout(pcb); /* Detach DLC from the session. Does not matter which state DLC in */ LIST_REMOVE(pcb, session_next); pcb->session = NULL; /* Change DLC state and wakeup all sleepers */ pcb->state = NG_BTSOCKET_RFCOMM_DLC_CLOSED; pcb->so->so_error = error; soisdisconnected(pcb->so); wakeup(&pcb->state); /* Check if we have any DLCs left on the session */ if (LIST_EMPTY(&s->dlcs) && INITIATOR(s)) { NG_BTSOCKET_RFCOMM_INFO( "%s: Disconnecting session, state=%d, flags=%#x, mtu=%d\n", __func__, s->state, s->flags, s->mtu); switch (s->state) { case NG_BTSOCKET_RFCOMM_SESSION_CLOSED: case NG_BTSOCKET_RFCOMM_SESSION_DISCONNECTING: /* * Do not have to do anything here. We can get here * when L2CAP connection was terminated or we have * received DISC on multiplexor channel */ break; case NG_BTSOCKET_RFCOMM_SESSION_OPEN: /* Send DISC on multiplexor channel */ error = ng_btsocket_rfcomm_send_command(s, RFCOMM_FRAME_DISC, 0); if (error == 0) { s->state = NG_BTSOCKET_RFCOMM_SESSION_DISCONNECTING; break; } /* FALL THROUGH */ case NG_BTSOCKET_RFCOMM_SESSION_CONNECTING: case NG_BTSOCKET_RFCOMM_SESSION_CONNECTED: s->state = NG_BTSOCKET_RFCOMM_SESSION_CLOSED; break; /* case NG_BTSOCKET_RFCOMM_SESSION_LISTENING: */ default: panic("%s: Invalid session state=%d, flags=%#x\n", __func__, s->state, s->flags); break; } ng_btsocket_rfcomm_task_wakeup(); } } /* ng_btsocket_rfcomm_pcb_kill */ /* * Look for given dlci for given RFCOMM session. Caller must hold s->session_mtx */ static ng_btsocket_rfcomm_pcb_p ng_btsocket_rfcomm_pcb_by_dlci(ng_btsocket_rfcomm_session_p s, int dlci) { ng_btsocket_rfcomm_pcb_p pcb = NULL; mtx_assert(&s->session_mtx, MA_OWNED); LIST_FOREACH(pcb, &s->dlcs, session_next) if (pcb->dlci == dlci) break; return (pcb); } /* ng_btsocket_rfcomm_pcb_by_dlci */ /* * Look for socket that listens on given src address and given channel */ static ng_btsocket_rfcomm_pcb_p ng_btsocket_rfcomm_pcb_listener(bdaddr_p src, int channel) { ng_btsocket_rfcomm_pcb_p pcb = NULL, pcb1 = NULL; mtx_lock(&ng_btsocket_rfcomm_sockets_mtx); LIST_FOREACH(pcb, &ng_btsocket_rfcomm_sockets, next) { if (pcb->channel != channel || !SOLISTENING(pcb->so)) continue; if (bcmp(&pcb->src, src, sizeof(*src)) == 0) break; if (bcmp(&pcb->src, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) pcb1 = pcb; } mtx_unlock(&ng_btsocket_rfcomm_sockets_mtx); return ((pcb != NULL)? pcb : pcb1); } /* ng_btsocket_rfcomm_pcb_listener */ /***************************************************************************** ***************************************************************************** ** Misc. functions ***************************************************************************** *****************************************************************************/ /* * Set timeout. Caller MUST hold pcb_mtx */ static void ng_btsocket_rfcomm_timeout(ng_btsocket_rfcomm_pcb_p pcb) { mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (!(pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMO)) { pcb->flags |= NG_BTSOCKET_RFCOMM_DLC_TIMO; pcb->flags &= ~NG_BTSOCKET_RFCOMM_DLC_TIMEDOUT; callout_reset(&pcb->timo, ng_btsocket_rfcomm_timo * hz, ng_btsocket_rfcomm_process_timeout, pcb); } else panic("%s: Duplicated socket timeout?!\n", __func__); } /* ng_btsocket_rfcomm_timeout */ /* * Unset pcb timeout. Caller MUST hold pcb_mtx */ static void ng_btsocket_rfcomm_untimeout(ng_btsocket_rfcomm_pcb_p pcb) { mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->flags & NG_BTSOCKET_RFCOMM_DLC_TIMO) { callout_stop(&pcb->timo); pcb->flags &= ~NG_BTSOCKET_RFCOMM_DLC_TIMO; pcb->flags &= ~NG_BTSOCKET_RFCOMM_DLC_TIMEDOUT; } else panic("%s: No socket timeout?!\n", __func__); } /* ng_btsocket_rfcomm_timeout */ /* * Process pcb timeout */ static void ng_btsocket_rfcomm_process_timeout(void *xpcb) { ng_btsocket_rfcomm_pcb_p pcb = (ng_btsocket_rfcomm_pcb_p) xpcb; mtx_assert(&pcb->pcb_mtx, MA_OWNED); NG_BTSOCKET_RFCOMM_INFO( "%s: Timeout, so=%p, dlci=%d, state=%d, flags=%#x\n", __func__, pcb->so, pcb->dlci, pcb->state, pcb->flags); pcb->flags &= ~NG_BTSOCKET_RFCOMM_DLC_TIMO; pcb->flags |= NG_BTSOCKET_RFCOMM_DLC_TIMEDOUT; switch (pcb->state) { case NG_BTSOCKET_RFCOMM_DLC_CONFIGURING: case NG_BTSOCKET_RFCOMM_DLC_CONNECTING: pcb->state = NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING; break; case NG_BTSOCKET_RFCOMM_DLC_W4_CONNECT: case NG_BTSOCKET_RFCOMM_DLC_DISCONNECTING: break; default: panic( "%s: DLC timeout in invalid state, dlci=%d, state=%d, flags=%#x\n", __func__, pcb->dlci, pcb->state, pcb->flags); break; } ng_btsocket_rfcomm_task_wakeup(); } /* ng_btsocket_rfcomm_process_timeout */ /* * Get up to length bytes from the socket buffer */ static struct mbuf * ng_btsocket_rfcomm_prepare_packet(struct sockbuf *sb, int length) { struct mbuf *top = NULL, *m = NULL, *n = NULL, *nextpkt = NULL; int mlen, noff, len; MGETHDR(top, M_NOWAIT, MT_DATA); if (top == NULL) return (NULL); top->m_pkthdr.len = length; top->m_len = 0; mlen = MHLEN; m = top; n = sb->sb_mb; nextpkt = n->m_nextpkt; noff = 0; while (length > 0 && n != NULL) { len = min(mlen - m->m_len, n->m_len - noff); if (len > length) len = length; bcopy(mtod(n, caddr_t)+noff, mtod(m, caddr_t)+m->m_len, len); m->m_len += len; noff += len; length -= len; if (length > 0 && m->m_len == mlen) { MGET(m->m_next, M_NOWAIT, MT_DATA); if (m->m_next == NULL) { NG_FREE_M(top); return (NULL); } m = m->m_next; m->m_len = 0; mlen = MLEN; } if (noff == n->m_len) { noff = 0; n = n->m_next; if (n == NULL) n = nextpkt; nextpkt = (n != NULL)? n->m_nextpkt : NULL; } } if (length < 0) panic("%s: length=%d\n", __func__, length); if (length > 0 && n == NULL) panic("%s: bogus length=%d, n=%p\n", __func__, length, n); return (top); } /* ng_btsocket_rfcomm_prepare_packet */ diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_sco.c b/sys/netgraph/bluetooth/socket/ng_btsocket_sco.c index d9700f91c132..115dd6d87ec4 100644 --- a/sys/netgraph/bluetooth/socket/ng_btsocket_sco.c +++ b/sys/netgraph/bluetooth/socket/ng_btsocket_sco.c @@ -1,1988 +1,1962 @@ /* * ng_btsocket_sco.c */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_btsocket_sco.c,v 1.2 2005/10/31 18:08:51 max Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* MALLOC define */ #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_BTSOCKET_SCO, "netgraph_btsocks_sco", "Netgraph Bluetooth SCO sockets"); #else #define M_NETGRAPH_BTSOCKET_SCO M_NETGRAPH #endif /* NG_SEPARATE_MALLOC */ /* Netgraph node methods */ static ng_constructor_t ng_btsocket_sco_node_constructor; static ng_rcvmsg_t ng_btsocket_sco_node_rcvmsg; static ng_shutdown_t ng_btsocket_sco_node_shutdown; static ng_newhook_t ng_btsocket_sco_node_newhook; static ng_connect_t ng_btsocket_sco_node_connect; static ng_rcvdata_t ng_btsocket_sco_node_rcvdata; static ng_disconnect_t ng_btsocket_sco_node_disconnect; static void ng_btsocket_sco_input (void *, int); static void ng_btsocket_sco_rtclean (void *, int); /* Netgraph type descriptor */ static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_BTSOCKET_SCO_NODE_TYPE, .constructor = ng_btsocket_sco_node_constructor, .rcvmsg = ng_btsocket_sco_node_rcvmsg, .shutdown = ng_btsocket_sco_node_shutdown, .newhook = ng_btsocket_sco_node_newhook, .connect = ng_btsocket_sco_node_connect, .rcvdata = ng_btsocket_sco_node_rcvdata, .disconnect = ng_btsocket_sco_node_disconnect, }; /* Globals */ static u_int32_t ng_btsocket_sco_debug_level; static node_p ng_btsocket_sco_node; static struct ng_bt_itemq ng_btsocket_sco_queue; static struct mtx ng_btsocket_sco_queue_mtx; static struct task ng_btsocket_sco_queue_task; static struct mtx ng_btsocket_sco_sockets_mtx; static LIST_HEAD(, ng_btsocket_sco_pcb) ng_btsocket_sco_sockets; static LIST_HEAD(, ng_btsocket_sco_rtentry) ng_btsocket_sco_rt; static struct mtx ng_btsocket_sco_rt_mtx; static struct task ng_btsocket_sco_rt_task; static struct timeval ng_btsocket_sco_lasttime; static int ng_btsocket_sco_curpps; /* Sysctl tree */ SYSCTL_DECL(_net_bluetooth_sco_sockets); static SYSCTL_NODE(_net_bluetooth_sco_sockets, OID_AUTO, seq, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Bluetooth SEQPACKET SCO sockets family"); SYSCTL_UINT(_net_bluetooth_sco_sockets_seq, OID_AUTO, debug_level, CTLFLAG_RW, &ng_btsocket_sco_debug_level, NG_BTSOCKET_WARN_LEVEL, "Bluetooth SEQPACKET SCO sockets debug level"); SYSCTL_UINT(_net_bluetooth_sco_sockets_seq, OID_AUTO, queue_len, CTLFLAG_RD, &ng_btsocket_sco_queue.len, 0, "Bluetooth SEQPACKET SCO sockets input queue length"); SYSCTL_UINT(_net_bluetooth_sco_sockets_seq, OID_AUTO, queue_maxlen, CTLFLAG_RD, &ng_btsocket_sco_queue.maxlen, 0, "Bluetooth SEQPACKET SCO sockets input queue max. length"); SYSCTL_UINT(_net_bluetooth_sco_sockets_seq, OID_AUTO, queue_drops, CTLFLAG_RD, &ng_btsocket_sco_queue.drops, 0, "Bluetooth SEQPACKET SCO sockets input queue drops"); /* Debug */ #define NG_BTSOCKET_SCO_INFO \ if (ng_btsocket_sco_debug_level >= NG_BTSOCKET_INFO_LEVEL && \ ppsratecheck(&ng_btsocket_sco_lasttime, &ng_btsocket_sco_curpps, 1)) \ printf #define NG_BTSOCKET_SCO_WARN \ if (ng_btsocket_sco_debug_level >= NG_BTSOCKET_WARN_LEVEL && \ ppsratecheck(&ng_btsocket_sco_lasttime, &ng_btsocket_sco_curpps, 1)) \ printf #define NG_BTSOCKET_SCO_ERR \ if (ng_btsocket_sco_debug_level >= NG_BTSOCKET_ERR_LEVEL && \ ppsratecheck(&ng_btsocket_sco_lasttime, &ng_btsocket_sco_curpps, 1)) \ printf #define NG_BTSOCKET_SCO_ALERT \ if (ng_btsocket_sco_debug_level >= NG_BTSOCKET_ALERT_LEVEL && \ ppsratecheck(&ng_btsocket_sco_lasttime, &ng_btsocket_sco_curpps, 1)) \ printf /* * Netgraph message processing routines */ static int ng_btsocket_sco_process_lp_con_cfm (struct ng_mesg *, ng_btsocket_sco_rtentry_p); static int ng_btsocket_sco_process_lp_con_ind (struct ng_mesg *, ng_btsocket_sco_rtentry_p); static int ng_btsocket_sco_process_lp_discon_ind (struct ng_mesg *, ng_btsocket_sco_rtentry_p); /* * Send LP messages to the lower layer */ static int ng_btsocket_sco_send_lp_con_req (ng_btsocket_sco_pcb_p); static int ng_btsocket_sco_send_lp_con_rsp (ng_btsocket_sco_rtentry_p, bdaddr_p, int); static int ng_btsocket_sco_send_lp_discon_req (ng_btsocket_sco_pcb_p); static int ng_btsocket_sco_send2 (ng_btsocket_sco_pcb_p); /* * Timeout processing routines */ static void ng_btsocket_sco_timeout (ng_btsocket_sco_pcb_p); static void ng_btsocket_sco_untimeout (ng_btsocket_sco_pcb_p); static void ng_btsocket_sco_process_timeout (void *); /* * Other stuff */ static ng_btsocket_sco_pcb_p ng_btsocket_sco_pcb_by_addr(bdaddr_p); static ng_btsocket_sco_pcb_p ng_btsocket_sco_pcb_by_handle(bdaddr_p, int); static ng_btsocket_sco_pcb_p ng_btsocket_sco_pcb_by_addrs(bdaddr_p, bdaddr_p); #define ng_btsocket_sco_wakeup_input_task() \ taskqueue_enqueue(taskqueue_swi, &ng_btsocket_sco_queue_task) #define ng_btsocket_sco_wakeup_route_task() \ taskqueue_enqueue(taskqueue_swi, &ng_btsocket_sco_rt_task) /***************************************************************************** ***************************************************************************** ** Netgraph node interface ***************************************************************************** *****************************************************************************/ /* * Netgraph node constructor. Do not allow to create node of this type. */ static int ng_btsocket_sco_node_constructor(node_p node) { return (EINVAL); } /* ng_btsocket_sco_node_constructor */ /* * Do local shutdown processing. Let old node go and create new fresh one. */ static int ng_btsocket_sco_node_shutdown(node_p node) { int error = 0; NG_NODE_UNREF(node); /* Create new node */ error = ng_make_node_common(&typestruct, &ng_btsocket_sco_node); if (error != 0) { NG_BTSOCKET_SCO_ALERT( "%s: Could not create Netgraph node, error=%d\n", __func__, error); ng_btsocket_sco_node = NULL; return (error); } error = ng_name_node(ng_btsocket_sco_node, NG_BTSOCKET_SCO_NODE_TYPE); if (error != 0) { NG_BTSOCKET_SCO_ALERT( "%s: Could not name Netgraph node, error=%d\n", __func__, error); NG_NODE_UNREF(ng_btsocket_sco_node); ng_btsocket_sco_node = NULL; return (error); } return (0); } /* ng_btsocket_sco_node_shutdown */ /* * We allow any hook to be connected to the node. */ static int ng_btsocket_sco_node_newhook(node_p node, hook_p hook, char const *name) { return (0); } /* ng_btsocket_sco_node_newhook */ /* * Just say "YEP, that's OK by me!" */ static int ng_btsocket_sco_node_connect(hook_p hook) { NG_HOOK_SET_PRIVATE(hook, NULL); NG_HOOK_REF(hook); /* Keep extra reference to the hook */ #if 0 NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); NG_HOOK_FORCE_QUEUE(hook); #endif return (0); } /* ng_btsocket_sco_node_connect */ /* * Hook disconnection. Schedule route cleanup task */ static int ng_btsocket_sco_node_disconnect(hook_p hook) { /* * If hook has private information than we must have this hook in * the routing table and must schedule cleaning for the routing table. * Otherwise hook was connected but we never got "hook_info" message, * so we have never added this hook to the routing table and it save * to just delete it. */ if (NG_HOOK_PRIVATE(hook) != NULL) return (ng_btsocket_sco_wakeup_route_task()); NG_HOOK_UNREF(hook); /* Remove extra reference */ return (0); } /* ng_btsocket_sco_node_disconnect */ /* * Process incoming messages */ static int ng_btsocket_sco_node_rcvmsg(node_p node, item_p item, hook_p hook) { struct ng_mesg *msg = NGI_MSG(item); /* item still has message */ int error = 0; if (msg != NULL && msg->header.typecookie == NGM_HCI_COOKIE) { mtx_lock(&ng_btsocket_sco_queue_mtx); if (NG_BT_ITEMQ_FULL(&ng_btsocket_sco_queue)) { NG_BTSOCKET_SCO_ERR( "%s: Input queue is full (msg)\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_sco_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { if (hook != NULL) { NG_HOOK_REF(hook); NGI_SET_HOOK(item, hook); } NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_sco_queue, item); error = ng_btsocket_sco_wakeup_input_task(); } mtx_unlock(&ng_btsocket_sco_queue_mtx); } else { NG_FREE_ITEM(item); error = EINVAL; } return (error); } /* ng_btsocket_sco_node_rcvmsg */ /* * Receive data on a hook */ static int ng_btsocket_sco_node_rcvdata(hook_p hook, item_p item) { int error = 0; mtx_lock(&ng_btsocket_sco_queue_mtx); if (NG_BT_ITEMQ_FULL(&ng_btsocket_sco_queue)) { NG_BTSOCKET_SCO_ERR( "%s: Input queue is full (data)\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_sco_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { NG_HOOK_REF(hook); NGI_SET_HOOK(item, hook); NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_sco_queue, item); error = ng_btsocket_sco_wakeup_input_task(); } mtx_unlock(&ng_btsocket_sco_queue_mtx); return (error); } /* ng_btsocket_sco_node_rcvdata */ /* * Process LP_ConnectCfm event from the lower layer protocol */ static int ng_btsocket_sco_process_lp_con_cfm(struct ng_mesg *msg, ng_btsocket_sco_rtentry_p rt) { ng_hci_lp_con_cfm_ep *ep = NULL; ng_btsocket_sco_pcb_t *pcb = NULL; int error = 0; if (msg->header.arglen != sizeof(*ep)) return (EMSGSIZE); ep = (ng_hci_lp_con_cfm_ep *)(msg->data); mtx_lock(&ng_btsocket_sco_sockets_mtx); /* Look for the socket with the token */ pcb = ng_btsocket_sco_pcb_by_addrs(&rt->src, &ep->bdaddr); if (pcb == NULL) { mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (ENOENT); } /* pcb is locked */ NG_BTSOCKET_SCO_INFO( "%s: Got LP_ConnectCfm response, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, status=%d, handle=%d, state=%d\n", __func__, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], ep->status, ep->con_handle, pcb->state); if (pcb->state != NG_BTSOCKET_SCO_CONNECTING) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (ENOENT); } ng_btsocket_sco_untimeout(pcb); if (ep->status == 0) { /* * Connection is open. Update connection handle and * socket state */ pcb->con_handle = ep->con_handle; pcb->state = NG_BTSOCKET_SCO_OPEN; soisconnected(pcb->so); } else { /* * We have failed to open connection, so disconnect the socket */ pcb->so->so_error = ECONNREFUSED; /* XXX convert status ??? */ pcb->state = NG_BTSOCKET_SCO_CLOSED; soisdisconnected(pcb->so); } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (error); } /* ng_btsocket_sco_process_lp_con_cfm */ /* * Process LP_ConnectInd indicator. Find socket that listens on address. * Find exact or closest match. */ static int ng_btsocket_sco_process_lp_con_ind(struct ng_mesg *msg, ng_btsocket_sco_rtentry_p rt) { ng_hci_lp_con_ind_ep *ep = NULL; ng_btsocket_sco_pcb_t *pcb = NULL, *pcb1 = NULL; int error = 0; u_int16_t status = 0; if (msg->header.arglen != sizeof(*ep)) return (EMSGSIZE); ep = (ng_hci_lp_con_ind_ep *)(msg->data); NG_BTSOCKET_SCO_INFO( "%s: Got LP_ConnectInd indicator, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], ep->bdaddr.b[5], ep->bdaddr.b[4], ep->bdaddr.b[3], ep->bdaddr.b[2], ep->bdaddr.b[1], ep->bdaddr.b[0]); mtx_lock(&ng_btsocket_sco_sockets_mtx); pcb = ng_btsocket_sco_pcb_by_addr(&rt->src); if (pcb != NULL) { struct socket *so1; /* pcb is locked */ CURVNET_SET(pcb->so->so_vnet); so1 = sonewconn(pcb->so, 0); CURVNET_RESTORE(); if (so1 == NULL) { status = 0x0d; /* Rejected due to limited resources */ goto respond; } /* * If we got here than we have created new socket. So complete * connection. If we we listening on specific address then copy * source address from listening socket, otherwise copy source * address from hook's routing information. */ pcb1 = so2sco_pcb(so1); KASSERT((pcb1 != NULL), ("%s: pcb1 == NULL\n", __func__)); mtx_lock(&pcb1->pcb_mtx); if (bcmp(&pcb->src, NG_HCI_BDADDR_ANY, sizeof(pcb->src)) != 0) bcopy(&pcb->src, &pcb1->src, sizeof(pcb1->src)); else bcopy(&rt->src, &pcb1->src, sizeof(pcb1->src)); pcb1->flags &= ~NG_BTSOCKET_SCO_CLIENT; bcopy(&ep->bdaddr, &pcb1->dst, sizeof(pcb1->dst)); pcb1->rt = rt; } else /* Nobody listens on requested BDADDR */ status = 0x1f; /* Unspecified Error */ respond: error = ng_btsocket_sco_send_lp_con_rsp(rt, &ep->bdaddr, status); if (pcb1 != NULL) { if (error != 0) { pcb1->so->so_error = error; pcb1->state = NG_BTSOCKET_SCO_CLOSED; soisdisconnected(pcb1->so); } else { pcb1->state = NG_BTSOCKET_SCO_CONNECTING; soisconnecting(pcb1->so); ng_btsocket_sco_timeout(pcb1); } mtx_unlock(&pcb1->pcb_mtx); } if (pcb != NULL) mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (error); } /* ng_btsocket_sco_process_lp_con_ind */ /* * Process LP_DisconnectInd indicator */ static int ng_btsocket_sco_process_lp_discon_ind(struct ng_mesg *msg, ng_btsocket_sco_rtentry_p rt) { ng_hci_lp_discon_ind_ep *ep = NULL; ng_btsocket_sco_pcb_t *pcb = NULL; /* Check message */ if (msg->header.arglen != sizeof(*ep)) return (EMSGSIZE); ep = (ng_hci_lp_discon_ind_ep *)(msg->data); mtx_lock(&ng_btsocket_sco_sockets_mtx); /* Look for the socket with given channel ID */ pcb = ng_btsocket_sco_pcb_by_handle(&rt->src, ep->con_handle); if (pcb == NULL) { mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (0); } /* * Disconnect the socket. If there was any pending request we can * not do anything here anyway. */ /* pcb is locked */ NG_BTSOCKET_SCO_INFO( "%s: Got LP_DisconnectInd indicator, src bdaddr=%x:%x:%x:%x:%x:%x, " \ "dst bdaddr=%x:%x:%x:%x:%x:%x, handle=%d, state=%d\n", __func__, pcb->src.b[5], pcb->src.b[4], pcb->src.b[3], pcb->src.b[2], pcb->src.b[1], pcb->src.b[0], pcb->dst.b[5], pcb->dst.b[4], pcb->dst.b[3], pcb->dst.b[2], pcb->dst.b[1], pcb->dst.b[0], pcb->con_handle, pcb->state); if (pcb->flags & NG_BTSOCKET_SCO_TIMO) ng_btsocket_sco_untimeout(pcb); pcb->state = NG_BTSOCKET_SCO_CLOSED; soisdisconnected(pcb->so); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (0); } /* ng_btsocket_sco_process_lp_discon_ind */ /* * Send LP_ConnectReq request */ static int ng_btsocket_sco_send_lp_con_req(ng_btsocket_sco_pcb_p pcb) { struct ng_mesg *msg = NULL; ng_hci_lp_con_req_ep *ep = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->rt == NULL || pcb->rt->hook == NULL || NG_HOOK_NOT_VALID(pcb->rt->hook)) return (ENETDOWN); NG_MKMESSAGE(msg, NGM_HCI_COOKIE, NGM_HCI_LP_CON_REQ, sizeof(*ep), M_NOWAIT); if (msg == NULL) return (ENOMEM); ep = (ng_hci_lp_con_req_ep *)(msg->data); ep->link_type = NG_HCI_LINK_SCO; bcopy(&pcb->dst, &ep->bdaddr, sizeof(ep->bdaddr)); NG_SEND_MSG_HOOK(error, ng_btsocket_sco_node, msg, pcb->rt->hook, 0); return (error); } /* ng_btsocket_sco_send_lp_con_req */ /* * Send LP_ConnectRsp response */ static int ng_btsocket_sco_send_lp_con_rsp(ng_btsocket_sco_rtentry_p rt, bdaddr_p dst, int status) { struct ng_mesg *msg = NULL; ng_hci_lp_con_rsp_ep *ep = NULL; int error = 0; if (rt == NULL || rt->hook == NULL || NG_HOOK_NOT_VALID(rt->hook)) return (ENETDOWN); NG_MKMESSAGE(msg, NGM_HCI_COOKIE, NGM_HCI_LP_CON_RSP, sizeof(*ep), M_NOWAIT); if (msg == NULL) return (ENOMEM); ep = (ng_hci_lp_con_rsp_ep *)(msg->data); ep->status = status; ep->link_type = NG_HCI_LINK_SCO; bcopy(dst, &ep->bdaddr, sizeof(ep->bdaddr)); NG_SEND_MSG_HOOK(error, ng_btsocket_sco_node, msg, rt->hook, 0); return (error); } /* ng_btsocket_sco_send_lp_con_rsp */ /* * Send LP_DisconReq request */ static int ng_btsocket_sco_send_lp_discon_req(ng_btsocket_sco_pcb_p pcb) { struct ng_mesg *msg = NULL; ng_hci_lp_discon_req_ep *ep = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->rt == NULL || pcb->rt->hook == NULL || NG_HOOK_NOT_VALID(pcb->rt->hook)) return (ENETDOWN); NG_MKMESSAGE(msg, NGM_HCI_COOKIE, NGM_HCI_LP_DISCON_REQ, sizeof(*ep), M_NOWAIT); if (msg == NULL) return (ENOMEM); ep = (ng_hci_lp_discon_req_ep *)(msg->data); ep->con_handle = pcb->con_handle; ep->reason = 0x13; /* User Ended Connection */ NG_SEND_MSG_HOOK(error, ng_btsocket_sco_node, msg, pcb->rt->hook, 0); return (error); } /* ng_btsocket_sco_send_lp_discon_req */ /***************************************************************************** ***************************************************************************** ** Socket interface ***************************************************************************** *****************************************************************************/ /* * SCO sockets data input routine */ static void ng_btsocket_sco_data_input(struct mbuf *m, hook_p hook) { ng_hci_scodata_pkt_t *hdr = NULL; ng_btsocket_sco_pcb_t *pcb = NULL; ng_btsocket_sco_rtentry_t *rt = NULL; u_int16_t con_handle; if (hook == NULL) { NG_BTSOCKET_SCO_ALERT( "%s: Invalid source hook for SCO data packet\n", __func__); goto drop; } rt = (ng_btsocket_sco_rtentry_t *) NG_HOOK_PRIVATE(hook); if (rt == NULL) { NG_BTSOCKET_SCO_ALERT( "%s: Could not find out source bdaddr for SCO data packet\n", __func__); goto drop; } /* Make sure we can access header */ if (m->m_pkthdr.len < sizeof(*hdr)) { NG_BTSOCKET_SCO_ERR( "%s: SCO data packet too small, len=%d\n", __func__, m->m_pkthdr.len); goto drop; } if (m->m_len < sizeof(*hdr)) { m = m_pullup(m, sizeof(*hdr)); if (m == NULL) goto drop; } /* Strip SCO packet header and verify packet length */ hdr = mtod(m, ng_hci_scodata_pkt_t *); m_adj(m, sizeof(*hdr)); if (hdr->length != m->m_pkthdr.len) { NG_BTSOCKET_SCO_ERR( "%s: Bad SCO data packet length, len=%d, length=%d\n", __func__, m->m_pkthdr.len, hdr->length); goto drop; } /* * Now process packet */ con_handle = NG_HCI_CON_HANDLE(le16toh(hdr->con_handle)); NG_BTSOCKET_SCO_INFO( "%s: Received SCO data packet: src bdaddr=%x:%x:%x:%x:%x:%x, handle=%d, " \ "length=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], con_handle, hdr->length); mtx_lock(&ng_btsocket_sco_sockets_mtx); /* Find socket */ pcb = ng_btsocket_sco_pcb_by_handle(&rt->src, con_handle); if (pcb == NULL) { mtx_unlock(&ng_btsocket_sco_sockets_mtx); goto drop; } /* pcb is locked */ if (pcb->state != NG_BTSOCKET_SCO_OPEN) { NG_BTSOCKET_SCO_ERR( "%s: No connected socket found, src bdaddr=%x:%x:%x:%x:%x:%x, state=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], pcb->state); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); goto drop; } /* Check if we have enough space in socket receive queue */ if (m->m_pkthdr.len > sbspace(&pcb->so->so_rcv)) { NG_BTSOCKET_SCO_ERR( "%s: Not enough space in socket receive queue. Dropping SCO data packet, " \ "src bdaddr=%x:%x:%x:%x:%x:%x, len=%d, space=%ld\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], m->m_pkthdr.len, sbspace(&pcb->so->so_rcv)); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); goto drop; } /* Append packet to the socket receive queue and wakeup */ sbappendrecord(&pcb->so->so_rcv, m); m = NULL; sorwakeup(pcb->so); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); drop: NG_FREE_M(m); /* checks for m != NULL */ } /* ng_btsocket_sco_data_input */ /* * SCO sockets default message input routine */ static void ng_btsocket_sco_default_msg_input(struct ng_mesg *msg, hook_p hook) { ng_btsocket_sco_rtentry_t *rt = NULL; if (hook == NULL || NG_HOOK_NOT_VALID(hook)) return; rt = (ng_btsocket_sco_rtentry_t *) NG_HOOK_PRIVATE(hook); switch (msg->header.cmd) { case NGM_HCI_NODE_UP: { ng_hci_node_up_ep *ep = NULL; if (msg->header.arglen != sizeof(*ep)) break; ep = (ng_hci_node_up_ep *)(msg->data); if (bcmp(&ep->bdaddr, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) break; if (rt == NULL) { rt = malloc(sizeof(*rt), M_NETGRAPH_BTSOCKET_SCO, M_NOWAIT|M_ZERO); if (rt == NULL) break; NG_HOOK_SET_PRIVATE(hook, rt); mtx_lock(&ng_btsocket_sco_rt_mtx); LIST_INSERT_HEAD(&ng_btsocket_sco_rt, rt, next); } else mtx_lock(&ng_btsocket_sco_rt_mtx); bcopy(&ep->bdaddr, &rt->src, sizeof(rt->src)); rt->pkt_size = (ep->pkt_size == 0)? 60 : ep->pkt_size; rt->num_pkts = ep->num_pkts; rt->hook = hook; mtx_unlock(&ng_btsocket_sco_rt_mtx); NG_BTSOCKET_SCO_INFO( "%s: Updating hook \"%s\", src bdaddr=%x:%x:%x:%x:%x:%x, pkt_size=%d, " \ "num_pkts=%d\n", __func__, NG_HOOK_NAME(hook), rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], rt->pkt_size, rt->num_pkts); } break; case NGM_HCI_SYNC_CON_QUEUE: { ng_hci_sync_con_queue_ep *ep = NULL; ng_btsocket_sco_pcb_t *pcb = NULL; if (rt == NULL || msg->header.arglen != sizeof(*ep)) break; ep = (ng_hci_sync_con_queue_ep *)(msg->data); rt->pending -= ep->completed; if (rt->pending < 0) { NG_BTSOCKET_SCO_WARN( "%s: Pending packet counter is out of sync! bdaddr=%x:%x:%x:%x:%x:%x, " \ "handle=%d, pending=%d, completed=%d\n", __func__, rt->src.b[5], rt->src.b[4], rt->src.b[3], rt->src.b[2], rt->src.b[1], rt->src.b[0], ep->con_handle, rt->pending, ep->completed); rt->pending = 0; } mtx_lock(&ng_btsocket_sco_sockets_mtx); /* Find socket */ pcb = ng_btsocket_sco_pcb_by_handle(&rt->src, ep->con_handle); if (pcb == NULL) { mtx_unlock(&ng_btsocket_sco_sockets_mtx); break; } /* pcb is locked */ /* Check state */ if (pcb->state == NG_BTSOCKET_SCO_OPEN) { /* Remove timeout */ ng_btsocket_sco_untimeout(pcb); /* Drop completed packets from the send queue */ for (; ep->completed > 0; ep->completed --) sbdroprecord(&pcb->so->so_snd); /* Send more if we have any */ if (sbavail(&pcb->so->so_snd) > 0) if (ng_btsocket_sco_send2(pcb) == 0) ng_btsocket_sco_timeout(pcb); /* Wake up writers */ sowwakeup(pcb->so); } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); } break; default: NG_BTSOCKET_SCO_WARN( "%s: Unknown message, cmd=%d\n", __func__, msg->header.cmd); break; } NG_FREE_MSG(msg); /* Checks for msg != NULL */ } /* ng_btsocket_sco_default_msg_input */ /* * SCO sockets LP message input routine */ static void ng_btsocket_sco_lp_msg_input(struct ng_mesg *msg, hook_p hook) { ng_btsocket_sco_rtentry_p rt = NULL; if (hook == NULL) { NG_BTSOCKET_SCO_ALERT( "%s: Invalid source hook for LP message\n", __func__); goto drop; } rt = (ng_btsocket_sco_rtentry_p) NG_HOOK_PRIVATE(hook); if (rt == NULL) { NG_BTSOCKET_SCO_ALERT( "%s: Could not find out source bdaddr for LP message\n", __func__); goto drop; } switch (msg->header.cmd) { case NGM_HCI_LP_CON_CFM: /* Connection Confirmation Event */ ng_btsocket_sco_process_lp_con_cfm(msg, rt); break; case NGM_HCI_LP_CON_IND: /* Connection Indication Event */ ng_btsocket_sco_process_lp_con_ind(msg, rt); break; case NGM_HCI_LP_DISCON_IND: /* Disconnection Indication Event */ ng_btsocket_sco_process_lp_discon_ind(msg, rt); break; /* XXX FIXME add other LP messages */ default: NG_BTSOCKET_SCO_WARN( "%s: Unknown LP message, cmd=%d\n", __func__, msg->header.cmd); break; } drop: NG_FREE_MSG(msg); } /* ng_btsocket_sco_lp_msg_input */ /* * SCO sockets input routine */ static void ng_btsocket_sco_input(void *context, int pending) { item_p item = NULL; hook_p hook = NULL; for (;;) { mtx_lock(&ng_btsocket_sco_queue_mtx); NG_BT_ITEMQ_DEQUEUE(&ng_btsocket_sco_queue, item); mtx_unlock(&ng_btsocket_sco_queue_mtx); if (item == NULL) break; NGI_GET_HOOK(item, hook); if (hook != NULL && NG_HOOK_NOT_VALID(hook)) goto drop; switch(item->el_flags & NGQF_TYPE) { case NGQF_DATA: { struct mbuf *m = NULL; NGI_GET_M(item, m); ng_btsocket_sco_data_input(m, hook); } break; case NGQF_MESG: { struct ng_mesg *msg = NULL; NGI_GET_MSG(item, msg); switch (msg->header.cmd) { case NGM_HCI_LP_CON_CFM: case NGM_HCI_LP_CON_IND: case NGM_HCI_LP_DISCON_IND: /* XXX FIXME add other LP messages */ ng_btsocket_sco_lp_msg_input(msg, hook); break; default: ng_btsocket_sco_default_msg_input(msg, hook); break; } } break; default: KASSERT(0, ("%s: invalid item type=%ld\n", __func__, (item->el_flags & NGQF_TYPE))); break; } drop: if (hook != NULL) NG_HOOK_UNREF(hook); NG_FREE_ITEM(item); } } /* ng_btsocket_sco_input */ /* * Route cleanup task. Gets scheduled when hook is disconnected. Here we * will find all sockets that use "invalid" hook and disconnect them. */ static void ng_btsocket_sco_rtclean(void *context, int pending) { ng_btsocket_sco_pcb_p pcb = NULL, pcb_next = NULL; ng_btsocket_sco_rtentry_p rt = NULL; /* * First disconnect all sockets that use "invalid" hook */ mtx_lock(&ng_btsocket_sco_sockets_mtx); for(pcb = LIST_FIRST(&ng_btsocket_sco_sockets); pcb != NULL; ) { mtx_lock(&pcb->pcb_mtx); pcb_next = LIST_NEXT(pcb, next); if (pcb->rt != NULL && pcb->rt->hook != NULL && NG_HOOK_NOT_VALID(pcb->rt->hook)) { if (pcb->flags & NG_BTSOCKET_SCO_TIMO) ng_btsocket_sco_untimeout(pcb); pcb->rt = NULL; pcb->so->so_error = ENETDOWN; pcb->state = NG_BTSOCKET_SCO_CLOSED; soisdisconnected(pcb->so); } mtx_unlock(&pcb->pcb_mtx); pcb = pcb_next; } mtx_unlock(&ng_btsocket_sco_sockets_mtx); /* * Now cleanup routing table */ mtx_lock(&ng_btsocket_sco_rt_mtx); for (rt = LIST_FIRST(&ng_btsocket_sco_rt); rt != NULL; ) { ng_btsocket_sco_rtentry_p rt_next = LIST_NEXT(rt, next); if (rt->hook != NULL && NG_HOOK_NOT_VALID(rt->hook)) { LIST_REMOVE(rt, next); NG_HOOK_SET_PRIVATE(rt->hook, NULL); NG_HOOK_UNREF(rt->hook); /* Remove extra reference */ bzero(rt, sizeof(*rt)); free(rt, M_NETGRAPH_BTSOCKET_SCO); } rt = rt_next; } mtx_unlock(&ng_btsocket_sco_rt_mtx); } /* ng_btsocket_sco_rtclean */ /* * Initialize everything */ static void ng_btsocket_sco_init(void *arg __unused) { int error = 0; ng_btsocket_sco_node = NULL; ng_btsocket_sco_debug_level = NG_BTSOCKET_WARN_LEVEL; /* Register Netgraph node type */ error = ng_newtype(&typestruct); if (error != 0) { NG_BTSOCKET_SCO_ALERT( "%s: Could not register Netgraph node type, error=%d\n", __func__, error); return; } /* Create Netgrapg node */ error = ng_make_node_common(&typestruct, &ng_btsocket_sco_node); if (error != 0) { NG_BTSOCKET_SCO_ALERT( "%s: Could not create Netgraph node, error=%d\n", __func__, error); ng_btsocket_sco_node = NULL; return; } error = ng_name_node(ng_btsocket_sco_node, NG_BTSOCKET_SCO_NODE_TYPE); if (error != 0) { NG_BTSOCKET_SCO_ALERT( "%s: Could not name Netgraph node, error=%d\n", __func__, error); NG_NODE_UNREF(ng_btsocket_sco_node); ng_btsocket_sco_node = NULL; return; } /* Create input queue */ NG_BT_ITEMQ_INIT(&ng_btsocket_sco_queue, 300); mtx_init(&ng_btsocket_sco_queue_mtx, "btsocks_sco_queue_mtx", NULL, MTX_DEF); TASK_INIT(&ng_btsocket_sco_queue_task, 0, ng_btsocket_sco_input, NULL); /* Create list of sockets */ LIST_INIT(&ng_btsocket_sco_sockets); mtx_init(&ng_btsocket_sco_sockets_mtx, "btsocks_sco_sockets_mtx", NULL, MTX_DEF); /* Routing table */ LIST_INIT(&ng_btsocket_sco_rt); mtx_init(&ng_btsocket_sco_rt_mtx, "btsocks_sco_rt_mtx", NULL, MTX_DEF); TASK_INIT(&ng_btsocket_sco_rt_task, 0, ng_btsocket_sco_rtclean, NULL); } /* ng_btsocket_sco_init */ SYSINIT(ng_btsocket_sco_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ng_btsocket_sco_init, NULL); /* * Abort connection on socket */ void ng_btsocket_sco_abort(struct socket *so) { so->so_error = ECONNABORTED; (void) ng_btsocket_sco_disconnect(so); } /* ng_btsocket_sco_abort */ void ng_btsocket_sco_close(struct socket *so) { (void) ng_btsocket_sco_disconnect(so); } /* ng_btsocket_sco_close */ /* * Create and attach new socket */ int ng_btsocket_sco_attach(struct socket *so, int proto, struct thread *td) { ng_btsocket_sco_pcb_p pcb = so2sco_pcb(so); int error; /* Check socket and protocol */ if (ng_btsocket_sco_node == NULL) return (EPROTONOSUPPORT); if (so->so_type != SOCK_SEQPACKET) return (ESOCKTNOSUPPORT); #if 0 /* XXX sonewconn() calls "pru_attach" with proto == 0 */ if (proto != 0) if (proto != BLUETOOTH_PROTO_SCO) return (EPROTONOSUPPORT); #endif /* XXX */ if (pcb != NULL) return (EISCONN); /* Reserve send and receive space if it is not reserved yet */ if ((so->so_snd.sb_hiwat == 0) || (so->so_rcv.sb_hiwat == 0)) { error = soreserve(so, NG_BTSOCKET_SCO_SENDSPACE, NG_BTSOCKET_SCO_RECVSPACE); if (error != 0) return (error); } /* Allocate the PCB */ pcb = malloc(sizeof(*pcb), M_NETGRAPH_BTSOCKET_SCO, M_NOWAIT | M_ZERO); if (pcb == NULL) return (ENOMEM); /* Link the PCB and the socket */ so->so_pcb = (caddr_t) pcb; pcb->so = so; pcb->state = NG_BTSOCKET_SCO_CLOSED; callout_init(&pcb->timo, 1); /* * Mark PCB mutex as DUPOK to prevent "duplicated lock of * the same type" message. When accepting new SCO connection * ng_btsocket_sco_process_lp_con_ind() holds both PCB mutexes * for "old" (accepting) PCB and "new" (created) PCB. */ mtx_init(&pcb->pcb_mtx, "btsocks_sco_pcb_mtx", NULL, MTX_DEF|MTX_DUPOK); /* * Add the PCB to the list * * XXX FIXME VERY IMPORTANT! * * This is totally FUBAR. We could get here in two cases: * * 1) When user calls socket() * 2) When we need to accept new incoming connection and call * sonewconn() * * In the first case we must acquire ng_btsocket_sco_sockets_mtx. * In the second case we hold ng_btsocket_sco_sockets_mtx already. * So we now need to distinguish between these cases. From reading * /sys/kern/uipc_socket2.c we can find out that sonewconn() calls * pru_attach with proto == 0 and td == NULL. For now use this fact * to figure out if we were called from socket() or from sonewconn(). */ if (td != NULL) mtx_lock(&ng_btsocket_sco_sockets_mtx); else mtx_assert(&ng_btsocket_sco_sockets_mtx, MA_OWNED); LIST_INSERT_HEAD(&ng_btsocket_sco_sockets, pcb, next); if (td != NULL) mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (0); } /* ng_btsocket_sco_attach */ /* * Bind socket */ int ng_btsocket_sco_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_sco_pcb_t *pcb = NULL; struct sockaddr_sco *sa = (struct sockaddr_sco *) nam; if (ng_btsocket_sco_node == NULL) return (EINVAL); /* Verify address */ if (sa == NULL) return (EINVAL); if (sa->sco_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if (sa->sco_len != sizeof(*sa)) return (EINVAL); mtx_lock(&ng_btsocket_sco_sockets_mtx); /* * Check if other socket has this address already (look for exact * match in bdaddr) and assign socket address if it's available. */ if (bcmp(&sa->sco_bdaddr, NG_HCI_BDADDR_ANY, sizeof(sa->sco_bdaddr)) != 0) { LIST_FOREACH(pcb, &ng_btsocket_sco_sockets, next) { mtx_lock(&pcb->pcb_mtx); if (bcmp(&pcb->src, &sa->sco_bdaddr, sizeof(bdaddr_t)) == 0) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (EADDRINUSE); } mtx_unlock(&pcb->pcb_mtx); } } pcb = so2sco_pcb(so); if (pcb == NULL) { mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (EINVAL); } mtx_lock(&pcb->pcb_mtx); bcopy(&sa->sco_bdaddr, &pcb->src, sizeof(pcb->src)); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); return (0); } /* ng_btsocket_sco_bind */ /* * Connect socket */ int ng_btsocket_sco_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { ng_btsocket_sco_pcb_t *pcb = so2sco_pcb(so); struct sockaddr_sco *sa = (struct sockaddr_sco *) nam; ng_btsocket_sco_rtentry_t *rt = NULL; int have_src, error = 0; /* Check socket */ if (pcb == NULL) return (EINVAL); if (ng_btsocket_sco_node == NULL) return (EINVAL); /* Verify address */ if (sa == NULL) return (EINVAL); if (sa->sco_family != AF_BLUETOOTH) return (EAFNOSUPPORT); if (sa->sco_len != sizeof(*sa)) return (EINVAL); if (bcmp(&sa->sco_bdaddr, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) return (EDESTADDRREQ); /* * Routing. Socket should be bound to some source address. The source * address can be ANY. Destination address must be set and it must not * be ANY. If source address is ANY then find first rtentry that has * src != dst. */ mtx_lock(&ng_btsocket_sco_rt_mtx); mtx_lock(&pcb->pcb_mtx); if (pcb->state == NG_BTSOCKET_SCO_CONNECTING) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_rt_mtx); return (EINPROGRESS); } if (bcmp(&sa->sco_bdaddr, &pcb->src, sizeof(pcb->src)) == 0) { mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_rt_mtx); return (EINVAL); } /* Send destination address and PSM */ bcopy(&sa->sco_bdaddr, &pcb->dst, sizeof(pcb->dst)); pcb->rt = NULL; have_src = bcmp(&pcb->src, NG_HCI_BDADDR_ANY, sizeof(pcb->src)); LIST_FOREACH(rt, &ng_btsocket_sco_rt, next) { if (rt->hook == NULL || NG_HOOK_NOT_VALID(rt->hook)) continue; /* Match src and dst */ if (have_src) { if (bcmp(&pcb->src, &rt->src, sizeof(rt->src)) == 0) break; } else { if (bcmp(&pcb->dst, &rt->src, sizeof(rt->src)) != 0) break; } } if (rt != NULL) { pcb->rt = rt; if (!have_src) bcopy(&rt->src, &pcb->src, sizeof(pcb->src)); } else error = EHOSTUNREACH; /* * Send LP_Connect request */ if (error == 0) { error = ng_btsocket_sco_send_lp_con_req(pcb); if (error == 0) { pcb->flags |= NG_BTSOCKET_SCO_CLIENT; pcb->state = NG_BTSOCKET_SCO_CONNECTING; soisconnecting(pcb->so); ng_btsocket_sco_timeout(pcb); } } mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_rt_mtx); return (error); } /* ng_btsocket_sco_connect */ /* * Process ioctl's calls on socket */ int ng_btsocket_sco_control(struct socket *so, u_long cmd, void *data, struct ifnet *ifp, struct thread *td) { return (EINVAL); } /* ng_btsocket_sco_control */ /* * Process getsockopt/setsockopt system calls */ int ng_btsocket_sco_ctloutput(struct socket *so, struct sockopt *sopt) { ng_btsocket_sco_pcb_p pcb = so2sco_pcb(so); int error, tmp; if (ng_btsocket_sco_node == NULL) return (EINVAL); if (pcb == NULL) return (EINVAL); if (sopt->sopt_level != SOL_SCO) return (0); mtx_lock(&pcb->pcb_mtx); switch (sopt->sopt_dir) { case SOPT_GET: if (pcb->state != NG_BTSOCKET_SCO_OPEN) { error = ENOTCONN; break; } switch (sopt->sopt_name) { case SO_SCO_MTU: tmp = pcb->rt->pkt_size; error = sooptcopyout(sopt, &tmp, sizeof(tmp)); break; case SO_SCO_CONNINFO: tmp = pcb->con_handle; error = sooptcopyout(sopt, &tmp, sizeof(tmp)); break; default: error = EINVAL; break; } break; case SOPT_SET: error = ENOPROTOOPT; break; default: error = EINVAL; break; } mtx_unlock(&pcb->pcb_mtx); return (error); } /* ng_btsocket_sco_ctloutput */ /* * Detach and destroy socket */ void ng_btsocket_sco_detach(struct socket *so) { ng_btsocket_sco_pcb_p pcb = so2sco_pcb(so); KASSERT(pcb != NULL, ("ng_btsocket_sco_detach: pcb == NULL")); if (ng_btsocket_sco_node == NULL) return; mtx_lock(&ng_btsocket_sco_sockets_mtx); mtx_lock(&pcb->pcb_mtx); if (pcb->flags & NG_BTSOCKET_SCO_TIMO) ng_btsocket_sco_untimeout(pcb); if (pcb->state == NG_BTSOCKET_SCO_OPEN) ng_btsocket_sco_send_lp_discon_req(pcb); pcb->state = NG_BTSOCKET_SCO_CLOSED; LIST_REMOVE(pcb, next); mtx_unlock(&pcb->pcb_mtx); mtx_unlock(&ng_btsocket_sco_sockets_mtx); mtx_destroy(&pcb->pcb_mtx); bzero(pcb, sizeof(*pcb)); free(pcb, M_NETGRAPH_BTSOCKET_SCO); soisdisconnected(so); so->so_pcb = NULL; } /* ng_btsocket_sco_detach */ /* * Disconnect socket */ int ng_btsocket_sco_disconnect(struct socket *so) { ng_btsocket_sco_pcb_p pcb = so2sco_pcb(so); if (pcb == NULL) return (EINVAL); if (ng_btsocket_sco_node == NULL) return (EINVAL); mtx_lock(&pcb->pcb_mtx); if (pcb->state == NG_BTSOCKET_SCO_DISCONNECTING) { mtx_unlock(&pcb->pcb_mtx); return (EINPROGRESS); } if (pcb->flags & NG_BTSOCKET_SCO_TIMO) ng_btsocket_sco_untimeout(pcb); if (pcb->state == NG_BTSOCKET_SCO_OPEN) { ng_btsocket_sco_send_lp_discon_req(pcb); pcb->state = NG_BTSOCKET_SCO_DISCONNECTING; soisdisconnecting(so); ng_btsocket_sco_timeout(pcb); } else { pcb->state = NG_BTSOCKET_SCO_CLOSED; soisdisconnected(so); } mtx_unlock(&pcb->pcb_mtx); return (0); } /* ng_btsocket_sco_disconnect */ /* * Listen on socket */ int ng_btsocket_sco_listen(struct socket *so, int backlog, struct thread *td) { ng_btsocket_sco_pcb_p pcb = so2sco_pcb(so); int error; if (pcb == NULL) return (EINVAL); if (ng_btsocket_sco_node == NULL) return (EINVAL); SOCK_LOCK(so); mtx_lock(&pcb->pcb_mtx); error = solisten_proto_check(so); if (error != 0) goto out; #if 0 if (bcmp(&pcb->src, NG_HCI_BDADDR_ANY, sizeof(bdaddr_t)) == 0) { error = EDESTADDRREQ; goto out; } #endif solisten_proto(so, backlog); out: mtx_unlock(&pcb->pcb_mtx); SOCK_UNLOCK(so); return (error); } /* ng_btsocket_listen */ -static int -ng_btsocket_sco_peeraddr1(struct socket *so, struct sockaddr_sco *sa) +/* + * Return peer address for getpeername(2) or for accept(2). For the latter + * case no extra work to do here, socket must be connected and ready. + */ +int +ng_btsocket_sco_peeraddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_sco_pcb_p pcb = so2sco_pcb(so); + struct sockaddr_sco *sco = (struct sockaddr_sco *)sa; if (pcb == NULL) return (EINVAL); if (ng_btsocket_sco_node == NULL) return (EINVAL); - *sa = (struct sockaddr_sco ){ + *sco = (struct sockaddr_sco ){ .sco_len = sizeof(struct sockaddr_sco), .sco_family = AF_BLUETOOTH, }; mtx_lock(&pcb->pcb_mtx); - bcopy(&pcb->dst, &sa->sco_bdaddr, sizeof(sa->sco_bdaddr)); + bcopy(&pcb->dst, &sco->sco_bdaddr, sizeof(sco->sco_bdaddr)); mtx_unlock(&pcb->pcb_mtx); return (0); } -/* - * Get peer address - */ -int -ng_btsocket_sco_peeraddr(struct socket *so, struct sockaddr **nam) -{ - struct sockaddr_sco sa; - int error; - - error = ng_btsocket_sco_peeraddr1(so, &sa); - if (error != 0) - return (error); - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); - - return ((*nam == NULL)? ENOMEM : 0); -} - -/* - * Accept connection on socket. Nothing to do here, socket must be connected - * and ready, so just return peer address and be done with it. - */ -int -ng_btsocket_sco_accept(struct socket *so, struct sockaddr *sa) -{ - if (ng_btsocket_sco_node == NULL) - return (EINVAL); - - return (ng_btsocket_sco_peeraddr1(so, (struct sockaddr_sco *)sa)); -} - /* * Send data to socket */ int ng_btsocket_sco_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { ng_btsocket_sco_pcb_t *pcb = so2sco_pcb(so); int error = 0; if (ng_btsocket_sco_node == NULL) { error = ENETDOWN; goto drop; } /* Check socket and input */ if (pcb == NULL || m == NULL || control != NULL) { error = EINVAL; goto drop; } mtx_lock(&pcb->pcb_mtx); /* Make sure socket is connected */ if (pcb->state != NG_BTSOCKET_SCO_OPEN) { mtx_unlock(&pcb->pcb_mtx); error = ENOTCONN; goto drop; } /* Check route */ if (pcb->rt == NULL || pcb->rt->hook == NULL || NG_HOOK_NOT_VALID(pcb->rt->hook)) { mtx_unlock(&pcb->pcb_mtx); error = ENETDOWN; goto drop; } /* Check packet size */ if (m->m_pkthdr.len > pcb->rt->pkt_size) { NG_BTSOCKET_SCO_ERR( "%s: Packet too big, len=%d, pkt_size=%d\n", __func__, m->m_pkthdr.len, pcb->rt->pkt_size); mtx_unlock(&pcb->pcb_mtx); error = EMSGSIZE; goto drop; } /* * First put packet on socket send queue. Then check if we have * pending timeout. If we do not have timeout then we must send * packet and schedule timeout. Otherwise do nothing and wait for * NGM_HCI_SYNC_CON_QUEUE message. */ sbappendrecord(&pcb->so->so_snd, m); m = NULL; if (!(pcb->flags & NG_BTSOCKET_SCO_TIMO)) { error = ng_btsocket_sco_send2(pcb); if (error == 0) ng_btsocket_sco_timeout(pcb); else sbdroprecord(&pcb->so->so_snd); /* XXX */ } mtx_unlock(&pcb->pcb_mtx); drop: NG_FREE_M(m); /* checks for != NULL */ NG_FREE_M(control); return (error); } /* ng_btsocket_sco_send */ /* * Send first packet in the socket queue to the SCO layer */ static int ng_btsocket_sco_send2(ng_btsocket_sco_pcb_p pcb) { struct mbuf *m = NULL; ng_hci_scodata_pkt_t *hdr = NULL; int error = 0; mtx_assert(&pcb->pcb_mtx, MA_OWNED); while (pcb->rt->pending < pcb->rt->num_pkts && sbavail(&pcb->so->so_snd) > 0) { /* Get a copy of the first packet on send queue */ m = m_dup(pcb->so->so_snd.sb_mb, M_NOWAIT); if (m == NULL) { error = ENOBUFS; break; } /* Create SCO packet header */ M_PREPEND(m, sizeof(*hdr), M_NOWAIT); if (m != NULL) if (m->m_len < sizeof(*hdr)) m = m_pullup(m, sizeof(*hdr)); if (m == NULL) { error = ENOBUFS; break; } /* Fill in the header */ hdr = mtod(m, ng_hci_scodata_pkt_t *); hdr->type = NG_HCI_SCO_DATA_PKT; hdr->con_handle = htole16(NG_HCI_MK_CON_HANDLE(pcb->con_handle, 0, 0)); hdr->length = m->m_pkthdr.len - sizeof(*hdr); /* Send packet */ NG_SEND_DATA_ONLY(error, pcb->rt->hook, m); if (error != 0) break; pcb->rt->pending ++; } return ((pcb->rt->pending > 0)? 0 : error); } /* ng_btsocket_sco_send2 */ /* * Get socket address */ int -ng_btsocket_sco_sockaddr(struct socket *so, struct sockaddr **nam) +ng_btsocket_sco_sockaddr(struct socket *so, struct sockaddr *sa) { ng_btsocket_sco_pcb_p pcb = so2sco_pcb(so); - struct sockaddr_sco sa; + struct sockaddr_sco *sco = (struct sockaddr_sco *)sa; if (pcb == NULL) return (EINVAL); if (ng_btsocket_sco_node == NULL) return (EINVAL); + *sco = (struct sockaddr_sco ){ + .sco_len = sizeof(struct sockaddr_sco), + .sco_family = AF_BLUETOOTH, + }; mtx_lock(&pcb->pcb_mtx); - bcopy(&pcb->src, &sa.sco_bdaddr, sizeof(sa.sco_bdaddr)); + bcopy(&pcb->src, &sco->sco_bdaddr, sizeof(sco->sco_bdaddr)); mtx_unlock(&pcb->pcb_mtx); - sa.sco_len = sizeof(sa); - sa.sco_family = AF_BLUETOOTH; - - *nam = sodupsockaddr((struct sockaddr *) &sa, M_NOWAIT); - - return ((*nam == NULL)? ENOMEM : 0); -} /* ng_btsocket_sco_sockaddr */ + return (0); +} /***************************************************************************** ***************************************************************************** ** Misc. functions ***************************************************************************** *****************************************************************************/ /* * Look for the socket that listens on given bdaddr. * Returns exact or close match (if any). * Caller must hold ng_btsocket_sco_sockets_mtx. * Returns with locked pcb. */ static ng_btsocket_sco_pcb_p ng_btsocket_sco_pcb_by_addr(bdaddr_p bdaddr) { ng_btsocket_sco_pcb_p p = NULL, p1 = NULL; mtx_assert(&ng_btsocket_sco_sockets_mtx, MA_OWNED); LIST_FOREACH(p, &ng_btsocket_sco_sockets, next) { mtx_lock(&p->pcb_mtx); if (p->so == NULL || !SOLISTENING(p->so)) { mtx_unlock(&p->pcb_mtx); continue; } if (bcmp(&p->src, bdaddr, sizeof(p->src)) == 0) return (p); /* return with locked pcb */ if (bcmp(&p->src, NG_HCI_BDADDR_ANY, sizeof(p->src)) == 0) p1 = p; mtx_unlock(&p->pcb_mtx); } if (p1 != NULL) mtx_lock(&p1->pcb_mtx); return (p1); } /* ng_btsocket_sco_pcb_by_addr */ /* * Look for the socket that assigned to given source address and handle. * Caller must hold ng_btsocket_sco_sockets_mtx. * Returns with locked pcb. */ static ng_btsocket_sco_pcb_p ng_btsocket_sco_pcb_by_handle(bdaddr_p src, int con_handle) { ng_btsocket_sco_pcb_p p = NULL; mtx_assert(&ng_btsocket_sco_sockets_mtx, MA_OWNED); LIST_FOREACH(p, &ng_btsocket_sco_sockets, next) { mtx_lock(&p->pcb_mtx); if (p->con_handle == con_handle && bcmp(src, &p->src, sizeof(p->src)) == 0) return (p); /* return with locked pcb */ mtx_unlock(&p->pcb_mtx); } return (NULL); } /* ng_btsocket_sco_pcb_by_handle */ /* * Look for the socket in CONNECTING state with given source and destination * addresses. Caller must hold ng_btsocket_sco_sockets_mtx. * Returns with locked pcb. */ static ng_btsocket_sco_pcb_p ng_btsocket_sco_pcb_by_addrs(bdaddr_p src, bdaddr_p dst) { ng_btsocket_sco_pcb_p p = NULL; mtx_assert(&ng_btsocket_sco_sockets_mtx, MA_OWNED); LIST_FOREACH(p, &ng_btsocket_sco_sockets, next) { mtx_lock(&p->pcb_mtx); if (p->state == NG_BTSOCKET_SCO_CONNECTING && bcmp(src, &p->src, sizeof(p->src)) == 0 && bcmp(dst, &p->dst, sizeof(p->dst)) == 0) return (p); /* return with locked pcb */ mtx_unlock(&p->pcb_mtx); } return (NULL); } /* ng_btsocket_sco_pcb_by_addrs */ /* * Set timeout on socket */ static void ng_btsocket_sco_timeout(ng_btsocket_sco_pcb_p pcb) { mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (!(pcb->flags & NG_BTSOCKET_SCO_TIMO)) { pcb->flags |= NG_BTSOCKET_SCO_TIMO; callout_reset(&pcb->timo, bluetooth_sco_rtx_timeout(), ng_btsocket_sco_process_timeout, pcb); } else KASSERT(0, ("%s: Duplicated socket timeout?!\n", __func__)); } /* ng_btsocket_sco_timeout */ /* * Unset timeout on socket */ static void ng_btsocket_sco_untimeout(ng_btsocket_sco_pcb_p pcb) { mtx_assert(&pcb->pcb_mtx, MA_OWNED); if (pcb->flags & NG_BTSOCKET_SCO_TIMO) { callout_stop(&pcb->timo); pcb->flags &= ~NG_BTSOCKET_SCO_TIMO; } else KASSERT(0, ("%s: No socket timeout?!\n", __func__)); } /* ng_btsocket_sco_untimeout */ /* * Process timeout on socket */ static void ng_btsocket_sco_process_timeout(void *xpcb) { ng_btsocket_sco_pcb_p pcb = (ng_btsocket_sco_pcb_p) xpcb; mtx_lock(&pcb->pcb_mtx); pcb->flags &= ~NG_BTSOCKET_SCO_TIMO; pcb->so->so_error = ETIMEDOUT; switch (pcb->state) { case NG_BTSOCKET_SCO_CONNECTING: /* Connect timeout - close the socket */ pcb->state = NG_BTSOCKET_SCO_CLOSED; soisdisconnected(pcb->so); break; case NG_BTSOCKET_SCO_OPEN: /* Send timeout - did not get NGM_HCI_SYNC_CON_QUEUE */ sbdroprecord(&pcb->so->so_snd); sowwakeup(pcb->so); /* XXX FIXME what to do with pcb->rt->pending??? */ break; case NG_BTSOCKET_SCO_DISCONNECTING: /* Disconnect timeout - disconnect the socket anyway */ pcb->state = NG_BTSOCKET_SCO_CLOSED; soisdisconnected(pcb->so); break; default: NG_BTSOCKET_SCO_ERR( "%s: Invalid socket state=%d\n", __func__, pcb->state); break; } mtx_unlock(&pcb->pcb_mtx); } /* ng_btsocket_sco_process_timeout */ diff --git a/sys/netgraph/ng_ksocket.c b/sys/netgraph/ng_ksocket.c index 23a53528833f..98eb19ab485b 100644 --- a/sys/netgraph/ng_ksocket.c +++ b/sys/netgraph/ng_ksocket.c @@ -1,1300 +1,1294 @@ /* * ng_ksocket.c */ /*- * Copyright (c) 1996-1999 Whistle Communications, Inc. * All rights reserved. * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; * provided, however, that: * 1. Any and all reproductions of the source or object code must include the * copyright notice above and the following disclaimer of warranties; and * 2. No rights are granted, in any manner or form, to use Whistle * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Archie Cobbs * $Whistle: ng_ksocket.c,v 1.1 1999/11/16 20:04:40 archie Exp $ */ /* * Kernel socket node type. This node type is basically a kernel-mode * version of a socket... kindof like the reverse of the socket node type. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_KSOCKET, "netgraph_ksock", "netgraph ksock node"); #else #define M_NETGRAPH_KSOCKET M_NETGRAPH #endif #define OFFSETOF(s, e) ((char *)&((s *)0)->e - (char *)((s *)0)) #define SADATA_OFFSET (OFFSETOF(struct sockaddr, sa_data)) /* Node private data */ struct ng_ksocket_private { node_p node; hook_p hook; struct socket *so; int fn_sent; /* FN call on incoming event was sent */ LIST_HEAD(, ng_ksocket_private) embryos; LIST_ENTRY(ng_ksocket_private) siblings; u_int32_t flags; u_int32_t response_token; ng_ID_t response_addr; }; typedef struct ng_ksocket_private *priv_p; /* Flags for priv_p */ #define KSF_CONNECTING 0x00000001 /* Waiting for connection complete */ #define KSF_ACCEPTING 0x00000002 /* Waiting for accept complete */ #define KSF_EOFSEEN 0x00000004 /* Have sent 0-length EOF mbuf */ #define KSF_CLONED 0x00000008 /* Cloned from an accepting socket */ #define KSF_EMBRYONIC 0x00000010 /* Cloned node with no hooks yet */ /* Netgraph node methods */ static ng_constructor_t ng_ksocket_constructor; static ng_rcvmsg_t ng_ksocket_rcvmsg; static ng_shutdown_t ng_ksocket_shutdown; static ng_newhook_t ng_ksocket_newhook; static ng_rcvdata_t ng_ksocket_rcvdata; static ng_connect_t ng_ksocket_connect; static ng_disconnect_t ng_ksocket_disconnect; /* Alias structure */ struct ng_ksocket_alias { const char *name; const int value; const int family; }; /* Protocol family aliases */ static const struct ng_ksocket_alias ng_ksocket_families[] = { { "local", PF_LOCAL }, { "inet", PF_INET }, { "inet6", PF_INET6 }, { "atm", PF_ATM }, { "divert", PF_DIVERT }, { NULL, -1 }, }; /* Socket type aliases */ static const struct ng_ksocket_alias ng_ksocket_types[] = { { "stream", SOCK_STREAM }, { "dgram", SOCK_DGRAM }, { "raw", SOCK_RAW }, { "rdm", SOCK_RDM }, { "seqpacket", SOCK_SEQPACKET }, { NULL, -1 }, }; /* Protocol aliases */ static const struct ng_ksocket_alias ng_ksocket_protos[] = { { "ip", IPPROTO_IP, PF_INET }, { "raw", IPPROTO_RAW, PF_INET }, { "icmp", IPPROTO_ICMP, PF_INET }, { "igmp", IPPROTO_IGMP, PF_INET }, { "tcp", IPPROTO_TCP, PF_INET }, { "udp", IPPROTO_UDP, PF_INET }, { "gre", IPPROTO_GRE, PF_INET }, { "esp", IPPROTO_ESP, PF_INET }, { "ah", IPPROTO_AH, PF_INET }, { "swipe", IPPROTO_SWIPE, PF_INET }, { "encap", IPPROTO_ENCAP, PF_INET }, { "pim", IPPROTO_PIM, PF_INET }, { NULL, -1 }, }; /* Helper functions */ static int ng_ksocket_accept(priv_p); static int ng_ksocket_listen_upcall(struct socket *so, void *arg, int waitflag); static void ng_ksocket_listen_upcall2(node_p node, hook_p hook, void *arg1, int arg2); static int ng_ksocket_incoming(struct socket *so, void *arg, int waitflag); static int ng_ksocket_parse(const struct ng_ksocket_alias *aliases, const char *s, int family); static void ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2); /************************************************************************ STRUCT SOCKADDR PARSE TYPE ************************************************************************/ /* Get the length of the data portion of a generic struct sockaddr */ static int ng_parse_generic_sockdata_getLength(const struct ng_parse_type *type, const u_char *start, const u_char *buf) { const struct sockaddr *sa; sa = (const struct sockaddr *)(buf - SADATA_OFFSET); return (sa->sa_len < SADATA_OFFSET) ? 0 : sa->sa_len - SADATA_OFFSET; } /* Type for the variable length data portion of a generic struct sockaddr */ static const struct ng_parse_type ng_ksocket_generic_sockdata_type = { &ng_parse_bytearray_type, &ng_parse_generic_sockdata_getLength }; /* Type for a generic struct sockaddr */ static const struct ng_parse_struct_field ng_parse_generic_sockaddr_type_fields[] = { { "len", &ng_parse_uint8_type }, { "family", &ng_parse_uint8_type }, { "data", &ng_ksocket_generic_sockdata_type }, { NULL } }; static const struct ng_parse_type ng_ksocket_generic_sockaddr_type = { &ng_parse_struct_type, &ng_parse_generic_sockaddr_type_fields }; /* Convert a struct sockaddr from ASCII to binary. If its a protocol family that we specially handle, do that, otherwise defer to the generic parse type ng_ksocket_generic_sockaddr_type. */ static int ng_ksocket_sockaddr_parse(const struct ng_parse_type *type, const char *s, int *off, const u_char *const start, u_char *const buf, int *buflen) { struct sockaddr *const sa = (struct sockaddr *)buf; enum ng_parse_token tok; char fambuf[32]; int family, len; char *t; /* If next token is a left curly brace, use generic parse type */ if ((tok = ng_parse_get_token(s, off, &len)) == T_LBRACE) { return (*ng_ksocket_generic_sockaddr_type.supertype->parse) (&ng_ksocket_generic_sockaddr_type, s, off, start, buf, buflen); } /* Get socket address family followed by a slash */ while (isspace(s[*off])) (*off)++; if ((t = strchr(s + *off, '/')) == NULL) return (EINVAL); if ((len = t - (s + *off)) > sizeof(fambuf) - 1) return (EINVAL); strncpy(fambuf, s + *off, len); fambuf[len] = '\0'; *off += len + 1; if ((family = ng_ksocket_parse(ng_ksocket_families, fambuf, 0)) == -1) return (EINVAL); /* Set family */ if (*buflen < SADATA_OFFSET) return (ERANGE); sa->sa_family = family; /* Set family-specific data and length */ switch (sa->sa_family) { case PF_LOCAL: /* Get pathname */ { const int pathoff = OFFSETOF(struct sockaddr_un, sun_path); struct sockaddr_un *const sun = (struct sockaddr_un *)sa; int toklen, pathlen; char *path; if ((path = ng_get_string_token(s, off, &toklen, NULL)) == NULL) return (EINVAL); pathlen = strlen(path); if (pathlen > SOCK_MAXADDRLEN) { free(path, M_NETGRAPH_KSOCKET); return (E2BIG); } if (*buflen < pathoff + pathlen) { free(path, M_NETGRAPH_KSOCKET); return (ERANGE); } *off += toklen; bcopy(path, sun->sun_path, pathlen); sun->sun_len = pathoff + pathlen; free(path, M_NETGRAPH_KSOCKET); break; } case PF_INET: /* Get an IP address with optional port */ { struct sockaddr_in *const sin = (struct sockaddr_in *)sa; int i; /* Parse this: [:port] */ for (i = 0; i < 4; i++) { u_long val; char *eptr; val = strtoul(s + *off, &eptr, 10); if (val > 0xff || eptr == s + *off) return (EINVAL); *off += (eptr - (s + *off)); ((u_char *)&sin->sin_addr)[i] = (u_char)val; if (i < 3) { if (s[*off] != '.') return (EINVAL); (*off)++; } else if (s[*off] == ':') { (*off)++; val = strtoul(s + *off, &eptr, 10); if (val > 0xffff || eptr == s + *off) return (EINVAL); *off += (eptr - (s + *off)); sin->sin_port = htons(val); } else sin->sin_port = 0; } bzero(&sin->sin_zero, sizeof(sin->sin_zero)); sin->sin_len = sizeof(*sin); break; } #if 0 case PF_INET6: /* XXX implement this someday */ #endif default: return (EINVAL); } /* Done */ *buflen = sa->sa_len; return (0); } /* Convert a struct sockaddr from binary to ASCII */ static int ng_ksocket_sockaddr_unparse(const struct ng_parse_type *type, const u_char *data, int *off, char *cbuf, int cbuflen) { const struct sockaddr *sa = (const struct sockaddr *)(data + *off); int slen = 0; /* Output socket address, either in special or generic format */ switch (sa->sa_family) { case PF_LOCAL: { const int pathoff = OFFSETOF(struct sockaddr_un, sun_path); const struct sockaddr_un *sun = (const struct sockaddr_un *)sa; const int pathlen = sun->sun_len - pathoff; char pathbuf[SOCK_MAXADDRLEN + 1]; char *pathtoken; bcopy(sun->sun_path, pathbuf, pathlen); if ((pathtoken = ng_encode_string(pathbuf, pathlen)) == NULL) return (ENOMEM); slen += snprintf(cbuf, cbuflen, "local/%s", pathtoken); free(pathtoken, M_NETGRAPH_KSOCKET); if (slen >= cbuflen) return (ERANGE); *off += sun->sun_len; return (0); } case PF_INET: { const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; slen += snprintf(cbuf, cbuflen, "inet/%d.%d.%d.%d", ((const u_char *)&sin->sin_addr)[0], ((const u_char *)&sin->sin_addr)[1], ((const u_char *)&sin->sin_addr)[2], ((const u_char *)&sin->sin_addr)[3]); if (sin->sin_port != 0) { slen += snprintf(cbuf + strlen(cbuf), cbuflen - strlen(cbuf), ":%d", (u_int)ntohs(sin->sin_port)); } if (slen >= cbuflen) return (ERANGE); *off += sizeof(*sin); return(0); } #if 0 case PF_INET6: /* XXX implement this someday */ #endif default: return (*ng_ksocket_generic_sockaddr_type.supertype->unparse) (&ng_ksocket_generic_sockaddr_type, data, off, cbuf, cbuflen); } } /* Parse type for struct sockaddr */ static const struct ng_parse_type ng_ksocket_sockaddr_type = { NULL, NULL, NULL, &ng_ksocket_sockaddr_parse, &ng_ksocket_sockaddr_unparse, NULL /* no such thing as a default struct sockaddr */ }; /************************************************************************ STRUCT NG_KSOCKET_SOCKOPT PARSE TYPE ************************************************************************/ /* Get length of the struct ng_ksocket_sockopt value field, which is the just the excess of the message argument portion over the length of the struct ng_ksocket_sockopt. */ static int ng_parse_sockoptval_getLength(const struct ng_parse_type *type, const u_char *start, const u_char *buf) { static const int offset = OFFSETOF(struct ng_ksocket_sockopt, value); const struct ng_ksocket_sockopt *sopt; const struct ng_mesg *msg; sopt = (const struct ng_ksocket_sockopt *)(buf - offset); msg = (const struct ng_mesg *)((const u_char *)sopt - sizeof(*msg)); return msg->header.arglen - sizeof(*sopt); } /* Parse type for the option value part of a struct ng_ksocket_sockopt XXX Eventually, we should handle the different socket options specially. XXX This would avoid byte order problems, eg an integer value of 1 is XXX going to be "[1]" for little endian or "[3=1]" for big endian. */ static const struct ng_parse_type ng_ksocket_sockoptval_type = { &ng_parse_bytearray_type, &ng_parse_sockoptval_getLength }; /* Parse type for struct ng_ksocket_sockopt */ static const struct ng_parse_struct_field ng_ksocket_sockopt_type_fields[] = NG_KSOCKET_SOCKOPT_INFO(&ng_ksocket_sockoptval_type); static const struct ng_parse_type ng_ksocket_sockopt_type = { &ng_parse_struct_type, &ng_ksocket_sockopt_type_fields }; /* Parse type for struct ng_ksocket_accept */ static const struct ng_parse_struct_field ng_ksocket_accept_type_fields[] = NGM_KSOCKET_ACCEPT_INFO; static const struct ng_parse_type ng_ksocket_accept_type = { &ng_parse_struct_type, &ng_ksocket_accept_type_fields }; /* List of commands and how to convert arguments to/from ASCII */ static const struct ng_cmdlist ng_ksocket_cmds[] = { { NGM_KSOCKET_COOKIE, NGM_KSOCKET_BIND, "bind", &ng_ksocket_sockaddr_type, NULL }, { NGM_KSOCKET_COOKIE, NGM_KSOCKET_LISTEN, "listen", &ng_parse_int32_type, NULL }, { NGM_KSOCKET_COOKIE, NGM_KSOCKET_ACCEPT, "accept", NULL, &ng_ksocket_accept_type }, { NGM_KSOCKET_COOKIE, NGM_KSOCKET_CONNECT, "connect", &ng_ksocket_sockaddr_type, &ng_parse_int32_type }, { NGM_KSOCKET_COOKIE, NGM_KSOCKET_GETNAME, "getname", NULL, &ng_ksocket_sockaddr_type }, { NGM_KSOCKET_COOKIE, NGM_KSOCKET_GETPEERNAME, "getpeername", NULL, &ng_ksocket_sockaddr_type }, { NGM_KSOCKET_COOKIE, NGM_KSOCKET_SETOPT, "setopt", &ng_ksocket_sockopt_type, NULL }, { NGM_KSOCKET_COOKIE, NGM_KSOCKET_GETOPT, "getopt", &ng_ksocket_sockopt_type, &ng_ksocket_sockopt_type }, { 0 } }; /* Node type descriptor */ static struct ng_type ng_ksocket_typestruct = { .version = NG_ABI_VERSION, .name = NG_KSOCKET_NODE_TYPE, .constructor = ng_ksocket_constructor, .rcvmsg = ng_ksocket_rcvmsg, .shutdown = ng_ksocket_shutdown, .newhook = ng_ksocket_newhook, .connect = ng_ksocket_connect, .rcvdata = ng_ksocket_rcvdata, .disconnect = ng_ksocket_disconnect, .cmdlist = ng_ksocket_cmds, }; NETGRAPH_INIT(ksocket, &ng_ksocket_typestruct); #define ERROUT(x) do { error = (x); goto done; } while (0) /************************************************************************ NETGRAPH NODE STUFF ************************************************************************/ /* * Node type constructor * The NODE part is assumed to be all set up. * There is already a reference to the node for us. */ static int ng_ksocket_constructor(node_p node) { priv_p priv; /* Allocate private structure */ priv = malloc(sizeof(*priv), M_NETGRAPH_KSOCKET, M_NOWAIT | M_ZERO); if (priv == NULL) return (ENOMEM); LIST_INIT(&priv->embryos); /* cross link them */ priv->node = node; NG_NODE_SET_PRIVATE(node, priv); /* Done */ return (0); } /* * Give our OK for a hook to be added. The hook name is of the * form "//" where the three components may * be decimal numbers or else aliases from the above lists. * * Connecting a hook amounts to opening the socket. Disconnecting * the hook closes the socket and destroys the node as well. */ static int ng_ksocket_newhook(node_p node, hook_p hook, const char *name0) { struct thread *td = curthread; /* XXX broken */ const priv_p priv = NG_NODE_PRIVATE(node); char *s1, *s2, name[NG_HOOKSIZ]; int family, type, protocol, error; /* Check if we're already connected */ if (priv->hook != NULL) return (EISCONN); if (priv->flags & KSF_CLONED) { if (priv->flags & KSF_EMBRYONIC) { /* Remove ourselves from our parent's embryo list */ LIST_REMOVE(priv, siblings); priv->flags &= ~KSF_EMBRYONIC; } } else { /* Extract family, type, and protocol from hook name */ snprintf(name, sizeof(name), "%s", name0); s1 = name; if ((s2 = strchr(s1, '/')) == NULL) return (EINVAL); *s2++ = '\0'; family = ng_ksocket_parse(ng_ksocket_families, s1, 0); if (family == -1) return (EINVAL); s1 = s2; if ((s2 = strchr(s1, '/')) == NULL) return (EINVAL); *s2++ = '\0'; type = ng_ksocket_parse(ng_ksocket_types, s1, 0); if (type == -1) return (EINVAL); s1 = s2; protocol = ng_ksocket_parse(ng_ksocket_protos, s1, family); if (protocol == -1) return (EINVAL); /* Create the socket */ error = socreate(family, &priv->so, type, protocol, td->td_ucred, td); if (error != 0) return (error); /* XXX call soreserve() ? */ } /* OK */ priv->hook = hook; /* * In case of misconfigured routing a packet may reenter * ksocket node recursively. Decouple stack to avoid possible * panics about sleeping with locks held. */ NG_HOOK_FORCE_QUEUE(hook); return(0); } static int ng_ksocket_connect(hook_p hook) { node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); struct socket *const so = priv->so; /* Add our hook for incoming data and other events */ SOCKBUF_LOCK(&priv->so->so_rcv); soupcall_set(priv->so, SO_RCV, ng_ksocket_incoming, node); SOCKBUF_UNLOCK(&priv->so->so_rcv); SOCKBUF_LOCK(&priv->so->so_snd); soupcall_set(priv->so, SO_SND, ng_ksocket_incoming, node); SOCKBUF_UNLOCK(&priv->so->so_snd); SOCK_LOCK(priv->so); priv->so->so_state |= SS_NBIO; SOCK_UNLOCK(priv->so); /* * --Original comment-- * On a cloned socket we may have already received one or more * upcalls which we couldn't handle without a hook. Handle * those now. * We cannot call the upcall function directly * from here, because until this function has returned our * hook isn't connected. * * ---meta comment for -current --- * XXX This is dubius. * Upcalls between the time that the hook was * first created and now (on another processesor) will * be earlier on the queue than the request to finalise the hook. * By the time the hook is finalised, * The queued upcalls will have happened and the code * will have discarded them because of a lack of a hook. * (socket not open). * * This is a bad byproduct of the complicated way in which hooks * are now created (3 daisy chained async events). * * Since we are a netgraph operation * We know that we hold a lock on this node. This forces the * request we make below to be queued rather than implemented * immediately which will cause the upcall function to be called a bit * later. * However, as we will run any waiting queued operations immediately * after doing this one, if we have not finalised the other end * of the hook, those queued operations will fail. */ if (priv->flags & KSF_CLONED) { ng_send_fn(node, NULL, &ng_ksocket_incoming2, so, M_NOWAIT); } return (0); } /* * Receive a control message */ static int ng_ksocket_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct thread *td = curthread; /* XXX broken */ const priv_p priv = NG_NODE_PRIVATE(node); struct socket *const so = priv->so; struct ng_mesg *resp = NULL; int error = 0; struct ng_mesg *msg; NGI_GET_MSG(item, msg); switch (msg->header.typecookie) { case NGM_KSOCKET_COOKIE: switch (msg->header.cmd) { case NGM_KSOCKET_BIND: { struct sockaddr *const sa = (struct sockaddr *)msg->data; /* Sanity check */ if (msg->header.arglen < SADATA_OFFSET || msg->header.arglen < sa->sa_len) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Bind */ error = sobind(so, sa, td); break; } case NGM_KSOCKET_LISTEN: { /* Sanity check */ if (msg->header.arglen != sizeof(int32_t)) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Listen */ so->so_state |= SS_NBIO; error = solisten(so, *((int32_t *)msg->data), td); if (error == 0) { SOLISTEN_LOCK(so); solisten_upcall_set(so, ng_ksocket_listen_upcall, priv); SOLISTEN_UNLOCK(so); } break; } case NGM_KSOCKET_ACCEPT: { /* Sanity check */ if (msg->header.arglen != 0) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Make sure the socket is capable of accepting */ if (!(so->so_options & SO_ACCEPTCONN)) ERROUT(EINVAL); if (priv->flags & KSF_ACCEPTING) ERROUT(EALREADY); /* * If a connection is already complete, take it. * Otherwise let the upcall function deal with * the connection when it comes in. Don't return * EWOULDBLOCK, per ng_ksocket(4) documentation. */ error = ng_ksocket_accept(priv); if (error == EWOULDBLOCK) error = 0; if (error != 0) ERROUT(error); priv->response_token = msg->header.token; priv->response_addr = NGI_RETADDR(item); break; } case NGM_KSOCKET_CONNECT: { struct sockaddr *const sa = (struct sockaddr *)msg->data; /* Sanity check */ if (msg->header.arglen < SADATA_OFFSET || msg->header.arglen < sa->sa_len) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Do connect */ if ((so->so_state & SS_ISCONNECTING) != 0) ERROUT(EALREADY); if ((error = soconnect(so, sa, td)) != 0) { so->so_state &= ~SS_ISCONNECTING; ERROUT(error); } if ((so->so_state & SS_ISCONNECTING) != 0) { /* We will notify the sender when we connect */ priv->response_token = msg->header.token; priv->response_addr = NGI_RETADDR(item); priv->flags |= KSF_CONNECTING; ERROUT(EINPROGRESS); } break; } case NGM_KSOCKET_GETNAME: case NGM_KSOCKET_GETPEERNAME: { - int (*func)(struct socket *so, struct sockaddr **nam); - struct sockaddr *sa = NULL; - int len; + int (*func)(struct socket *so, struct sockaddr *sa); + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; /* Sanity check */ if (msg->header.arglen != 0) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Get function */ if (msg->header.cmd == NGM_KSOCKET_GETPEERNAME) { if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) ERROUT(ENOTCONN); - func = so->so_proto->pr_peeraddr; + func = sopeeraddr; } else - func = so->so_proto->pr_sockaddr; + func = sosockaddr; /* Get local or peer address */ - if ((error = (*func)(so, &sa)) != 0) - goto bail; - len = (sa == NULL) ? 0 : sa->sa_len; + error = (*func)(so, (struct sockaddr *)&ss); + if (error) + break; /* Send it back in a response */ - NG_MKRESPONSE(resp, msg, len, M_NOWAIT); - if (resp == NULL) { + NG_MKRESPONSE(resp, msg, ss.ss_len, M_NOWAIT); + if (resp != NULL) + bcopy(&ss, resp->data, ss.ss_len); + else error = ENOMEM; - goto bail; - } - bcopy(sa, resp->data, len); - bail: - /* Cleanup */ - if (sa != NULL) - free(sa, M_SONAME); break; } case NGM_KSOCKET_GETOPT: { struct ng_ksocket_sockopt *ksopt = (struct ng_ksocket_sockopt *)msg->data; struct sockopt sopt; /* Sanity check */ if (msg->header.arglen != sizeof(*ksopt)) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Get response with room for option value */ NG_MKRESPONSE(resp, msg, sizeof(*ksopt) + NG_KSOCKET_MAX_OPTLEN, M_NOWAIT); if (resp == NULL) ERROUT(ENOMEM); /* Get socket option, and put value in the response */ sopt.sopt_dir = SOPT_GET; sopt.sopt_level = ksopt->level; sopt.sopt_name = ksopt->name; sopt.sopt_td = NULL; sopt.sopt_valsize = NG_KSOCKET_MAX_OPTLEN; ksopt = (struct ng_ksocket_sockopt *)resp->data; sopt.sopt_val = ksopt->value; if ((error = sogetopt(so, &sopt)) != 0) { NG_FREE_MSG(resp); break; } /* Set actual value length */ resp->header.arglen = sizeof(*ksopt) + sopt.sopt_valsize; break; } case NGM_KSOCKET_SETOPT: { struct ng_ksocket_sockopt *const ksopt = (struct ng_ksocket_sockopt *)msg->data; const int valsize = msg->header.arglen - sizeof(*ksopt); struct sockopt sopt; /* Sanity check */ if (valsize < 0) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Set socket option */ sopt.sopt_dir = SOPT_SET; sopt.sopt_level = ksopt->level; sopt.sopt_name = ksopt->name; sopt.sopt_val = ksopt->value; sopt.sopt_valsize = valsize; sopt.sopt_td = NULL; error = sosetopt(so, &sopt); break; } default: error = EINVAL; break; } break; default: error = EINVAL; break; } done: NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return (error); } /* * Receive incoming data on our hook. Send it out the socket. */ static int ng_ksocket_rcvdata(hook_p hook, item_p item) { struct thread *td = curthread; /* XXX broken */ const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); struct socket *const so = priv->so; struct sockaddr *sa = NULL; int error; struct mbuf *m; #ifdef ALIGNED_POINTER struct mbuf *n; #endif /* ALIGNED_POINTER */ struct sa_tag *stag; /* Extract data */ NGI_GET_M(item, m); NG_FREE_ITEM(item); #ifdef ALIGNED_POINTER if (!ALIGNED_POINTER(mtod(m, caddr_t), uint32_t)) { n = m_defrag(m, M_NOWAIT); if (n == NULL) { m_freem(m); return (ENOBUFS); } m = n; } #endif /* ALIGNED_POINTER */ /* * Look if socket address is stored in packet tags. * If sockaddr is ours, or provided by a third party (zero id), * then we accept it. */ if (((stag = (struct sa_tag *)m_tag_locate(m, NGM_KSOCKET_COOKIE, NG_KSOCKET_TAG_SOCKADDR, NULL)) != NULL) && (stag->id == NG_NODE_ID(node) || stag->id == 0)) sa = &stag->sa; /* Reset specific mbuf flags to prevent addressing problems. */ m->m_flags &= ~(M_BCAST|M_MCAST); /* Send packet */ error = sosend(so, sa, 0, m, 0, 0, td); return (error); } /* * Destroy node */ static int ng_ksocket_shutdown(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); struct socket *so = priv->so; priv_p embryo; /* Close our socket (if any) */ if (priv->so != NULL) { if (SOLISTENING(so)) { SOLISTEN_LOCK(so); solisten_upcall_set(so, NULL, NULL); SOLISTEN_UNLOCK(so); } else { SOCK_RECVBUF_LOCK(so); soupcall_clear(so, SO_RCV); SOCK_RECVBUF_UNLOCK(so); SOCK_SENDBUF_LOCK(so); soupcall_clear(so, SO_SND); SOCK_SENDBUF_UNLOCK(so); } soclose(so); priv->so = NULL; } /* If we are an embryo, take ourselves out of the parent's list */ if (priv->flags & KSF_EMBRYONIC) { LIST_REMOVE(priv, siblings); priv->flags &= ~KSF_EMBRYONIC; } /* Remove any embryonic children we have */ while (!LIST_EMPTY(&priv->embryos)) { embryo = LIST_FIRST(&priv->embryos); ng_rmnode_self(embryo->node); } /* Take down netgraph node */ bzero(priv, sizeof(*priv)); free(priv, M_NETGRAPH_KSOCKET); NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); /* let the node escape */ return (0); } /* * Hook disconnection */ static int ng_ksocket_disconnect(hook_p hook) { KASSERT(NG_NODE_NUMHOOKS(NG_HOOK_NODE(hook)) == 0, ("%s: numhooks=%d?", __func__, NG_NODE_NUMHOOKS(NG_HOOK_NODE(hook)))); if (NG_NODE_IS_VALID(NG_HOOK_NODE(hook))) ng_rmnode_self(NG_HOOK_NODE(hook)); return (0); } /************************************************************************ HELPER STUFF ************************************************************************/ /* * You should not "just call" a netgraph node function from an external * asynchronous event. This is because in doing so you are ignoring the * locking on the netgraph nodes. Instead call your function via ng_send_fn(). * This will call the function you chose, but will first do all the * locking rigmarole. Your function MAY only be called at some distant future * time (several millisecs away) so don't give it any arguments * that may be revoked soon (e.g. on your stack). * * To decouple stack, we use queue version of ng_send_fn(). */ static int ng_ksocket_incoming(struct socket *so, void *arg, int waitflag) { const node_p node = arg; const priv_p priv = NG_NODE_PRIVATE(node); int wait = ((waitflag & M_WAITOK) ? NG_WAITOK : 0) | NG_QUEUE; /* * Even if node is not locked, as soon as we are called, we assume * it exist and it's private area is valid. With some care we can * access it. Mark node that incoming event for it was sent to * avoid unneded queue trashing. */ if (atomic_cmpset_int(&priv->fn_sent, 0, 1) && ng_send_fn1(node, NULL, &ng_ksocket_incoming2, so, 0, wait)) { atomic_store_rel_int(&priv->fn_sent, 0); } return (SU_OK); } /* * When incoming data is appended to the socket, we get notified here. * This is also called whenever a significant event occurs for the socket. * Our original caller may have queued this even some time ago and * we cannot trust that he even still exists. The node however is being * held with a reference by the queueing code and guarantied to be valid. */ static void ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2) { struct socket *so = arg1; const priv_p priv = NG_NODE_PRIVATE(node); struct ng_mesg *response; int error; KASSERT(so == priv->so, ("%s: wrong socket", __func__)); /* Allow next incoming event to be queued. */ atomic_store_rel_int(&priv->fn_sent, 0); /* Check whether a pending connect operation has completed */ if (priv->flags & KSF_CONNECTING) { if ((error = so->so_error) != 0) { so->so_error = 0; so->so_state &= ~SS_ISCONNECTING; } if (!(so->so_state & SS_ISCONNECTING)) { NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE, NGM_KSOCKET_CONNECT, sizeof(int32_t), M_NOWAIT); if (response != NULL) { response->header.flags |= NGF_RESP; response->header.token = priv->response_token; *(int32_t *)response->data = error; /* * send an async "response" message * to the node that set us up * (if it still exists) */ NG_SEND_MSG_ID(error, node, response, priv->response_addr, 0); } priv->flags &= ~KSF_CONNECTING; } } /* * If we don't have a hook, we must handle data events later. When * the hook gets created and is connected, this upcall function * will be called again. */ if (priv->hook == NULL) return; /* Read and forward available mbufs. */ while (1) { struct uio uio; struct sockaddr *sa; struct mbuf *m; int flags; /* Try to get next packet from socket. */ uio.uio_td = NULL; uio.uio_resid = IP_MAXPACKET; flags = MSG_DONTWAIT; sa = NULL; if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ? NULL : &sa, &uio, &m, NULL, &flags)) != 0) break; /* See if we got anything. */ if (flags & MSG_TRUNC) { m_freem(m); m = NULL; } if (m == NULL) { if (sa != NULL) free(sa, M_SONAME); break; } KASSERT(m->m_nextpkt == NULL, ("%s: nextpkt", __func__)); /* * Stream sockets do not have packet boundaries, so * we have to allocate a header mbuf and attach the * stream of data to it. */ if (so->so_type == SOCK_STREAM) { struct mbuf *mh; mh = m_gethdr(M_NOWAIT, MT_DATA); if (mh == NULL) { m_freem(m); if (sa != NULL) free(sa, M_SONAME); break; } mh->m_next = m; for (; m; m = m->m_next) mh->m_pkthdr.len += m->m_len; m = mh; } /* Put peer's socket address (if any) into a tag */ if (sa != NULL) { struct sa_tag *stag; stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE, NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) + sa->sa_len, M_NOWAIT); if (stag == NULL) { free(sa, M_SONAME); goto sendit; } bcopy(sa, &stag->sa, sa->sa_len); free(sa, M_SONAME); stag->id = NG_NODE_ID(node); m_tag_prepend(m, &stag->tag); } sendit: /* Forward data with optional peer sockaddr as packet tag */ NG_SEND_DATA_ONLY(error, priv->hook, m); } /* * If the peer has closed the connection, forward a 0-length mbuf * to indicate end-of-file. */ if (so->so_rcv.sb_state & SBS_CANTRCVMORE && !(priv->flags & KSF_EOFSEEN)) { struct mbuf *m; m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) NG_SEND_DATA_ONLY(error, priv->hook, m); priv->flags |= KSF_EOFSEEN; } } static int ng_ksocket_accept(priv_p priv) { struct socket *const head = priv->so; struct socket *so; struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; struct ng_mesg *resp; struct ng_ksocket_accept *resp_data; node_p node; priv_p priv2; int len; int error; SOLISTEN_LOCK(head); error = solisten_dequeue(head, &so, SOCK_NONBLOCK); if (error == EWOULDBLOCK) { priv->flags |= KSF_ACCEPTING; return (error); } priv->flags &= ~KSF_ACCEPTING; if (error) return (error); if ((error = soaccept(so, (struct sockaddr *)&ss)) != 0) return (error); len = OFFSETOF(struct ng_ksocket_accept, addr); len += ss.ss_len; NG_MKMESSAGE(resp, NGM_KSOCKET_COOKIE, NGM_KSOCKET_ACCEPT, len, M_NOWAIT); if (resp == NULL) { soclose(so); goto out; } resp->header.flags |= NGF_RESP; resp->header.token = priv->response_token; /* Clone a ksocket node to wrap the new socket */ error = ng_make_node_common(&ng_ksocket_typestruct, &node); if (error) { free(resp, M_NETGRAPH); soclose(so); goto out; } if (ng_ksocket_constructor(node) != 0) { NG_NODE_UNREF(node); free(resp, M_NETGRAPH); soclose(so); goto out; } priv2 = NG_NODE_PRIVATE(node); priv2->so = so; priv2->flags |= KSF_CLONED | KSF_EMBRYONIC; /* * Insert the cloned node into a list of embryonic children * on the parent node. When a hook is created on the cloned * node it will be removed from this list. When the parent * is destroyed it will destroy any embryonic children it has. */ LIST_INSERT_HEAD(&priv->embryos, priv2, siblings); SOCKBUF_LOCK(&so->so_rcv); soupcall_set(so, SO_RCV, ng_ksocket_incoming, node); SOCKBUF_UNLOCK(&so->so_rcv); SOCKBUF_LOCK(&so->so_snd); soupcall_set(so, SO_SND, ng_ksocket_incoming, node); SOCKBUF_UNLOCK(&so->so_snd); /* Fill in the response data and send it or return it to the caller */ resp_data = (struct ng_ksocket_accept *)resp->data; resp_data->nodeid = NG_NODE_ID(node); bcopy(&ss, &resp_data->addr, ss.ss_len); NG_SEND_MSG_ID(error, node, resp, priv->response_addr, 0); out: return (0); } static int ng_ksocket_listen_upcall(struct socket *so, void *arg, int waitflag) { priv_p priv = arg; int wait = ((waitflag & M_WAITOK) ? NG_WAITOK : 0) | NG_QUEUE; ng_send_fn1(priv->node, NULL, &ng_ksocket_listen_upcall2, priv, 0, wait); return (SU_OK); } static void ng_ksocket_listen_upcall2(node_p node, hook_p hook, void *arg1, int arg2) { const priv_p priv = NG_NODE_PRIVATE(node); (void )ng_ksocket_accept(priv); } /* * Parse out either an integer value or an alias. */ static int ng_ksocket_parse(const struct ng_ksocket_alias *aliases, const char *s, int family) { int k, val; char *eptr; /* Try aliases */ for (k = 0; aliases[k].name != NULL; k++) { if (strcmp(s, aliases[k].name) == 0 && aliases[k].family == family) return aliases[k].value; } /* Try parsing as a number */ val = (int)strtoul(s, &eptr, 10); if (val < 0 || *eptr != '\0') return (-1); return (val); } diff --git a/sys/netgraph/ng_socket.c b/sys/netgraph/ng_socket.c index 8a8f127ee39b..c356f7d6aa12 100644 --- a/sys/netgraph/ng_socket.c +++ b/sys/netgraph/ng_socket.c @@ -1,1209 +1,1202 @@ /* * ng_socket.c */ /*- * Copyright (c) 1996-1999 Whistle Communications, Inc. * All rights reserved. * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; * provided, however, that: * 1. Any and all reproductions of the source or object code must include the * copyright notice above and the following disclaimer of warranties; and * 2. No rights are granted, in any manner or form, to use Whistle * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Julian Elischer * $Whistle: ng_socket.c,v 1.28 1999/11/01 09:24:52 julian Exp $ */ /* * Netgraph socket nodes * * There are two types of netgraph sockets, control and data. * Control sockets have a netgraph node, but data sockets are * parasitic on control sockets, and have no node of their own. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_PATH, "netgraph_path", "netgraph path info"); static MALLOC_DEFINE(M_NETGRAPH_SOCK, "netgraph_sock", "netgraph socket info"); #else #define M_NETGRAPH_PATH M_NETGRAPH #define M_NETGRAPH_SOCK M_NETGRAPH #endif /* * It's Ascii-art time! * +-------------+ +-------------+ * |socket (ctl)| |socket (data)| * +-------------+ +-------------+ * ^ ^ * | | * v v * +-----------+ +-----------+ * |pcb (ctl)| |pcb (data)| * +-----------+ +-----------+ * ^ ^ * | | * v v * +--------------------------+ * | Socket type private | * | data | * +--------------------------+ * ^ * | * v * +----------------+ * | struct ng_node | * +----------------+ */ /* Netgraph node methods */ static ng_constructor_t ngs_constructor; static ng_rcvmsg_t ngs_rcvmsg; static ng_shutdown_t ngs_shutdown; static ng_newhook_t ngs_newhook; static ng_connect_t ngs_connect; static ng_findhook_t ngs_findhook; static ng_rcvdata_t ngs_rcvdata; static ng_disconnect_t ngs_disconnect; /* Internal methods */ static int ng_attach_data(struct socket *so); static int ng_attach_cntl(struct socket *so); static int ng_attach_common(struct socket *so, int type); static void ng_detach_common(struct ngpcb *pcbp, int type); static void ng_socket_free_priv(struct ngsock *priv); static int ng_connect_data(struct sockaddr *nam, struct ngpcb *pcbp); static int ng_bind(struct sockaddr *nam, struct ngpcb *pcbp); static int ngs_mod_event(module_t mod, int event, void *data); static void ng_socket_item_applied(void *context, int error); /* Netgraph type descriptor */ static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_SOCKET_NODE_TYPE, .mod_event = ngs_mod_event, .constructor = ngs_constructor, .rcvmsg = ngs_rcvmsg, .shutdown = ngs_shutdown, .newhook = ngs_newhook, .connect = ngs_connect, .findhook = ngs_findhook, .rcvdata = ngs_rcvdata, .disconnect = ngs_disconnect, }; NETGRAPH_INIT_ORDERED(socket, &typestruct, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); /* Buffer space */ static u_long ngpdg_sendspace = 20 * 1024; /* really max datagram size */ SYSCTL_ULONG(_net_graph, OID_AUTO, maxdgram, CTLFLAG_RW, &ngpdg_sendspace , 0, "Maximum outgoing Netgraph datagram size"); static u_long ngpdg_recvspace = 20 * 1024; SYSCTL_ULONG(_net_graph, OID_AUTO, recvspace, CTLFLAG_RW, &ngpdg_recvspace , 0, "Maximum space for incoming Netgraph datagrams"); /* List of all sockets (for netstat -f netgraph) */ static LIST_HEAD(, ngpcb) ngsocklist; static struct mtx ngsocketlist_mtx; #define sotongpcb(so) ((struct ngpcb *)(so)->so_pcb) /* If getting unexplained errors returned, set this to "kdb_enter("X"); */ #ifndef TRAP_ERROR #define TRAP_ERROR #endif struct hookpriv { LIST_ENTRY(hookpriv) next; hook_p hook; }; LIST_HEAD(ngshash, hookpriv); /* Per-node private data */ struct ngsock { struct ng_node *node; /* the associated netgraph node */ struct ngpcb *datasock; /* optional data socket */ struct ngpcb *ctlsock; /* optional control socket */ struct ngshash *hash; /* hash for hook names */ u_long hmask; /* hash mask */ int flags; int refs; struct mtx mtx; /* mtx to wait on */ int error; /* place to store error */ }; #define NGS_FLAG_NOLINGER 1 /* close with last hook */ /*************************************************************** Control sockets ***************************************************************/ static int ngc_attach(struct socket *so, int proto, struct thread *td) { struct ngpcb *const pcbp = sotongpcb(so); int error; error = priv_check(td, PRIV_NETGRAPH_CONTROL); if (error) return (error); if (pcbp != NULL) return (EISCONN); return (ng_attach_cntl(so)); } static void ngc_detach(struct socket *so) { struct ngpcb *const pcbp = sotongpcb(so); KASSERT(pcbp != NULL, ("ngc_detach: pcbp == NULL")); ng_detach_common(pcbp, NG_CONTROL); } static int ngc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *td) { struct ngpcb *const pcbp = sotongpcb(so); struct ngsock *const priv = NG_NODE_PRIVATE(pcbp->sockdata->node); struct sockaddr_ng *const sap = (struct sockaddr_ng *) addr; struct ng_mesg *msg; struct mbuf *m0; item_p item; char *path = NULL; int len, error = 0; struct ng_apply_info apply; if (control) { error = EINVAL; goto release; } /* Require destination as there may be >= 1 hooks on this node. */ if (addr == NULL) { error = EDESTADDRREQ; goto release; } if (sap->sg_len > NG_NODESIZ + offsetof(struct sockaddr_ng, sg_data)) { error = EINVAL; goto release; } /* * Allocate an expendable buffer for the path, chop off * the sockaddr header, and make sure it's NUL terminated. */ len = sap->sg_len - offsetof(struct sockaddr_ng, sg_data); path = malloc(len + 1, M_NETGRAPH_PATH, M_WAITOK); bcopy(sap->sg_data, path, len); path[len] = '\0'; /* * Move the actual message out of mbufs into a linear buffer. * Start by adding up the size of the data. (could use mh_len?) */ for (len = 0, m0 = m; m0 != NULL; m0 = m0->m_next) len += m0->m_len; /* * Move the data into a linear buffer as well. * Messages are not delivered in mbufs. */ msg = malloc(len + 1, M_NETGRAPH_MSG, M_WAITOK); m_copydata(m, 0, len, (char *)msg); if (msg->header.version != NG_VERSION) { free(msg, M_NETGRAPH_MSG); error = EINVAL; goto release; } /* * Hack alert! * We look into the message and if it mkpeers a node of unknown type, we * try to load it. We need to do this now, in syscall thread, because if * message gets queued and applied later we will get panic. */ if (msg->header.typecookie == NGM_GENERIC_COOKIE && msg->header.cmd == NGM_MKPEER) { struct ngm_mkpeer *const mkp = (struct ngm_mkpeer *) msg->data; if (ng_findtype(mkp->type) == NULL) { char filename[NG_TYPESIZ + 3]; int fileid; /* Not found, try to load it as a loadable module. */ snprintf(filename, sizeof(filename), "ng_%s", mkp->type); error = kern_kldload(curthread, filename, &fileid); if (error != 0) { free(msg, M_NETGRAPH_MSG); goto release; } /* See if type has been loaded successfully. */ if (ng_findtype(mkp->type) == NULL) { free(msg, M_NETGRAPH_MSG); (void)kern_kldunload(curthread, fileid, LINKER_UNLOAD_NORMAL); error = ENXIO; goto release; } } } item = ng_package_msg(msg, NG_WAITOK); if ((error = ng_address_path((pcbp->sockdata->node), item, path, 0)) != 0) { #ifdef TRACE_MESSAGES printf("ng_address_path: errx=%d\n", error); #endif goto release; } #ifdef TRACE_MESSAGES printf("[%x]:<---------[socket]: c=<%d>cmd=%x(%s) f=%x #%d (%s)\n", item->el_dest->nd_ID, msg->header.typecookie, msg->header.cmd, msg->header.cmdstr, msg->header.flags, msg->header.token, item->el_dest->nd_type->name); #endif SAVE_LINE(item); /* * We do not want to return from syscall until the item * is processed by destination node. We register callback * on the item, which will update priv->error when item * was applied. * If ng_snd_item() has queued item, we sleep until * callback wakes us up. */ bzero(&apply, sizeof(apply)); apply.apply = ng_socket_item_applied; apply.context = priv; item->apply = &apply; priv->error = -1; error = ng_snd_item(item, 0); mtx_lock(&priv->mtx); if (priv->error == -1) msleep(priv, &priv->mtx, 0, "ngsock", 0); mtx_unlock(&priv->mtx); KASSERT(priv->error != -1, ("ng_socket: priv->error wasn't updated")); error = priv->error; release: if (path != NULL) free(path, M_NETGRAPH_PATH); if (control != NULL) m_freem(control); if (m != NULL) m_freem(m); return (error); } static int ngc_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { struct ngpcb *const pcbp = sotongpcb(so); if (pcbp == NULL) return (EINVAL); return (ng_bind(nam, pcbp)); } static int ngc_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { /* * At this time refuse to do this.. it used to * do something but it was undocumented and not used. */ printf("program tried to connect control socket to remote node\n"); return (EINVAL); } /*************************************************************** Data sockets ***************************************************************/ static int ngd_attach(struct socket *so, int proto, struct thread *td) { struct ngpcb *const pcbp = sotongpcb(so); if (pcbp != NULL) return (EISCONN); return (ng_attach_data(so)); } static void ngd_detach(struct socket *so) { struct ngpcb *const pcbp = sotongpcb(so); KASSERT(pcbp != NULL, ("ngd_detach: pcbp == NULL")); ng_detach_common(pcbp, NG_DATA); } static int ngd_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *td) { struct epoch_tracker et; struct ngpcb *const pcbp = sotongpcb(so); struct sockaddr_ng *const sap = (struct sockaddr_ng *) addr; int len, error; hook_p hook = NULL; item_p item; char hookname[NG_HOOKSIZ]; if ((pcbp == NULL) || (control != NULL)) { error = EINVAL; goto release; } if (pcbp->sockdata == NULL) { error = ENOTCONN; goto release; } if (sap == NULL) { len = 0; /* Make compiler happy. */ } else { if (sap->sg_len > NG_NODESIZ + offsetof(struct sockaddr_ng, sg_data)) { error = EINVAL; goto release; } len = sap->sg_len - offsetof(struct sockaddr_ng, sg_data); } /* * If the user used any of these ways to not specify an address * then handle specially. */ if ((sap == NULL) || (len <= 0) || (*sap->sg_data == '\0')) { if (NG_NODE_NUMHOOKS(pcbp->sockdata->node) != 1) { error = EDESTADDRREQ; goto release; } /* * If exactly one hook exists, just use it. * Special case to allow write(2) to work on an ng_socket. */ hook = LIST_FIRST(&pcbp->sockdata->node->nd_hooks); } else { if (len >= NG_HOOKSIZ) { error = EINVAL; goto release; } /* * chop off the sockaddr header, and make sure it's NUL * terminated */ bcopy(sap->sg_data, hookname, len); hookname[len] = '\0'; /* Find the correct hook from 'hookname' */ hook = ng_findhook(pcbp->sockdata->node, hookname); if (hook == NULL) { error = EHOSTUNREACH; goto release; } } /* Send data. */ item = ng_package_data(m, NG_WAITOK); m = NULL; NET_EPOCH_ENTER(et); NG_FWD_ITEM_HOOK(error, item, hook); NET_EPOCH_EXIT(et); release: if (control != NULL) m_freem(control); if (m != NULL) m_freem(m); return (error); } static int ngd_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { struct ngpcb *const pcbp = sotongpcb(so); if (pcbp == NULL) return (EINVAL); return (ng_connect_data(nam, pcbp)); } /* * Used for both data and control sockets */ static int -ng_getsockaddr(struct socket *so, struct sockaddr **addr) +ng_getsockaddr(struct socket *so, struct sockaddr *sa) { + struct sockaddr_ng *sg = (struct sockaddr_ng *)sa; struct ngpcb *pcbp; - struct sockaddr_ng *sg; - int sg_len; int error = 0; pcbp = sotongpcb(so); if ((pcbp == NULL) || (pcbp->sockdata == NULL)) /* XXXGL: can this still happen? */ return (EINVAL); - sg_len = sizeof(struct sockaddr_ng) + NG_NODESIZ - - sizeof(sg->sg_data); - sg = malloc(sg_len, M_SONAME, M_WAITOK | M_ZERO); + *sg = (struct sockaddr_ng ){ + .sg_len = sizeof(struct sockaddr_ng), + .sg_family = AF_NETGRAPH, + }; mtx_lock(&pcbp->sockdata->mtx); if (pcbp->sockdata->node != NULL) { node_p node = pcbp->sockdata->node; if (NG_NODE_HAS_NAME(node)) bcopy(NG_NODE_NAME(node), sg->sg_data, strlen(NG_NODE_NAME(node))); else snprintf(sg->sg_data, sizeof(sg->sg_data), "[%x]", ng_node2ID(node)); - mtx_unlock(&pcbp->sockdata->mtx); - - sg->sg_len = sg_len; - sg->sg_family = AF_NETGRAPH; - *addr = (struct sockaddr *)sg; - } else { - mtx_unlock(&pcbp->sockdata->mtx); - free(sg, M_SONAME); + } else error = EINVAL; - } + mtx_unlock(&pcbp->sockdata->mtx); return (error); } /* * Attach a socket to it's protocol specific partner. * For a control socket, actually create a netgraph node and attach * to it as well. */ static int ng_attach_cntl(struct socket *so) { struct ngsock *priv; struct ngpcb *pcbp; node_p node; int error; /* Setup protocol control block */ if ((error = ng_attach_common(so, NG_CONTROL)) != 0) return (error); pcbp = sotongpcb(so); /* Make the generic node components */ if ((error = ng_make_node_common(&typestruct, &node)) != 0) { ng_detach_common(pcbp, NG_CONTROL); return (error); } /* * Allocate node private info and hash. We start * with 16 hash entries, however we may grow later * in ngs_newhook(). We can't predict how much hooks * does this node plan to have. */ priv = malloc(sizeof(*priv), M_NETGRAPH_SOCK, M_WAITOK | M_ZERO); priv->hash = hashinit(16, M_NETGRAPH_SOCK, &priv->hmask); /* Initialize mutex. */ mtx_init(&priv->mtx, "ng_socket", NULL, MTX_DEF); /* Link the pcb the private data. */ priv->ctlsock = pcbp; pcbp->sockdata = priv; priv->refs++; priv->node = node; pcbp->node_id = node->nd_ID; /* hint for netstat(1) */ /* Link the node and the private data. */ NG_NODE_SET_PRIVATE(priv->node, priv); NG_NODE_REF(priv->node); priv->refs++; return (0); } static int ng_attach_data(struct socket *so) { return (ng_attach_common(so, NG_DATA)); } /* * Set up a socket protocol control block. * This code is shared between control and data sockets. */ static int ng_attach_common(struct socket *so, int type) { struct ngpcb *pcbp; int error; /* Standard socket setup stuff. */ error = soreserve(so, ngpdg_sendspace, ngpdg_recvspace); if (error) return (error); /* Allocate the pcb. */ pcbp = malloc(sizeof(struct ngpcb), M_PCB, M_WAITOK | M_ZERO); pcbp->type = type; /* Link the pcb and the socket. */ so->so_pcb = (caddr_t)pcbp; pcbp->ng_socket = so; /* Add the socket to linked list */ mtx_lock(&ngsocketlist_mtx); LIST_INSERT_HEAD(&ngsocklist, pcbp, socks); mtx_unlock(&ngsocketlist_mtx); return (0); } /* * Disassociate the socket from it's protocol specific * partner. If it's attached to a node's private data structure, * then unlink from that too. If we were the last socket attached to it, * then shut down the entire node. Shared code for control and data sockets. */ static void ng_detach_common(struct ngpcb *pcbp, int which) { struct ngsock *priv = pcbp->sockdata; if (priv != NULL) { mtx_lock(&priv->mtx); switch (which) { case NG_CONTROL: priv->ctlsock = NULL; break; case NG_DATA: priv->datasock = NULL; break; default: panic("%s", __func__); } pcbp->sockdata = NULL; pcbp->node_id = 0; ng_socket_free_priv(priv); } pcbp->ng_socket->so_pcb = NULL; mtx_lock(&ngsocketlist_mtx); LIST_REMOVE(pcbp, socks); mtx_unlock(&ngsocketlist_mtx); free(pcbp, M_PCB); } /* * Remove a reference from node private data. */ static void ng_socket_free_priv(struct ngsock *priv) { mtx_assert(&priv->mtx, MA_OWNED); priv->refs--; if (priv->refs == 0) { mtx_destroy(&priv->mtx); hashdestroy(priv->hash, M_NETGRAPH_SOCK, priv->hmask); free(priv, M_NETGRAPH_SOCK); return; } if ((priv->refs == 1) && (priv->node != NULL)) { node_p node = priv->node; priv->node = NULL; mtx_unlock(&priv->mtx); NG_NODE_UNREF(node); ng_rmnode_self(node); } else mtx_unlock(&priv->mtx); } /* * Connect the data socket to a named control socket node. */ static int ng_connect_data(struct sockaddr *nam, struct ngpcb *pcbp) { struct sockaddr_ng *sap; node_p farnode; struct ngsock *priv; int error; item_p item; /* If we are already connected, don't do it again. */ if (pcbp->sockdata != NULL) return (EISCONN); /* * Find the target (victim) and check it doesn't already have * a data socket. Also check it is a 'socket' type node. * Use ng_package_data() and ng_address_path() to do this. */ sap = (struct sockaddr_ng *) nam; /* The item will hold the node reference. */ item = ng_package_data(NULL, NG_WAITOK); if ((error = ng_address_path(NULL, item, sap->sg_data, 0))) return (error); /* item is freed on failure */ /* * Extract node from item and free item. Remember we now have * a reference on the node. The item holds it for us. * when we free the item we release the reference. */ farnode = item->el_dest; /* shortcut */ if (strcmp(farnode->nd_type->name, NG_SOCKET_NODE_TYPE) != 0) { NG_FREE_ITEM(item); /* drop the reference to the node */ return (EINVAL); } priv = NG_NODE_PRIVATE(farnode); if (priv->datasock != NULL) { NG_FREE_ITEM(item); /* drop the reference to the node */ return (EADDRINUSE); } /* * Link the PCB and the private data struct. and note the extra * reference. Drop the extra reference on the node. */ mtx_lock(&priv->mtx); priv->datasock = pcbp; pcbp->sockdata = priv; pcbp->node_id = priv->node->nd_ID; /* hint for netstat(1) */ priv->refs++; mtx_unlock(&priv->mtx); NG_FREE_ITEM(item); /* drop the reference to the node */ return (0); } /* * Binding a socket means giving the corresponding node a name */ static int ng_bind(struct sockaddr *nam, struct ngpcb *pcbp) { struct ngsock *const priv = pcbp->sockdata; struct sockaddr_ng *const sap = (struct sockaddr_ng *) nam; if (priv == NULL) { TRAP_ERROR; return (EINVAL); } if ((sap->sg_len < 4) || (sap->sg_len > (NG_NODESIZ + 2)) || (sap->sg_data[0] == '\0') || (sap->sg_data[sap->sg_len - 3] != '\0')) { TRAP_ERROR; return (EINVAL); } return (ng_name_node(priv->node, sap->sg_data)); } /*************************************************************** Netgraph node ***************************************************************/ /* * You can only create new nodes from the socket end of things. */ static int ngs_constructor(node_p nodep) { return (EINVAL); } static void ngs_rehash(node_p node) { struct ngsock *priv = NG_NODE_PRIVATE(node); struct ngshash *new; struct hookpriv *hp; hook_p hook; uint32_t h; u_long hmask; new = hashinit_flags((priv->hmask + 1) * 2, M_NETGRAPH_SOCK, &hmask, HASH_NOWAIT); if (new == NULL) return; LIST_FOREACH(hook, &node->nd_hooks, hk_hooks) { hp = NG_HOOK_PRIVATE(hook); #ifdef INVARIANTS LIST_REMOVE(hp, next); #endif h = hash32_str(NG_HOOK_NAME(hook), HASHINIT) & hmask; LIST_INSERT_HEAD(&new[h], hp, next); } hashdestroy(priv->hash, M_NETGRAPH_SOCK, priv->hmask); priv->hash = new; priv->hmask = hmask; } /* * We allow any hook to be connected to the node. * There is no per-hook private information though. */ static int ngs_newhook(node_p node, hook_p hook, const char *name) { struct ngsock *const priv = NG_NODE_PRIVATE(node); struct hookpriv *hp; uint32_t h; hp = malloc(sizeof(*hp), M_NETGRAPH_SOCK, M_NOWAIT); if (hp == NULL) return (ENOMEM); if (node->nd_numhooks * 2 > priv->hmask) ngs_rehash(node); hp->hook = hook; h = hash32_str(name, HASHINIT) & priv->hmask; LIST_INSERT_HEAD(&priv->hash[h], hp, next); NG_HOOK_SET_PRIVATE(hook, hp); return (0); } /* * If only one hook, allow read(2) and write(2) to work. */ static int ngs_connect(hook_p hook) { node_p node = NG_HOOK_NODE(hook); struct ngsock *priv = NG_NODE_PRIVATE(node); if ((priv->datasock) && (priv->datasock->ng_socket)) { if (NG_NODE_NUMHOOKS(node) == 1) priv->datasock->ng_socket->so_state |= SS_ISCONNECTED; else priv->datasock->ng_socket->so_state &= ~SS_ISCONNECTED; } return (0); } /* Look up hook by name */ static hook_p ngs_findhook(node_p node, const char *name) { struct ngsock *priv = NG_NODE_PRIVATE(node); struct hookpriv *hp; uint32_t h; /* * Microoptimisation for an ng_socket with * a single hook, which is a common case. */ if (node->nd_numhooks == 1) { hook_p hook; hook = LIST_FIRST(&node->nd_hooks); if (strcmp(NG_HOOK_NAME(hook), name) == 0) return (hook); else return (NULL); } h = hash32_str(name, HASHINIT) & priv->hmask; LIST_FOREACH(hp, &priv->hash[h], next) if (strcmp(NG_HOOK_NAME(hp->hook), name) == 0) return (hp->hook); return (NULL); } /* * Incoming messages get passed up to the control socket. * Unless they are for us specifically (socket_type) */ static int ngs_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ngsock *const priv = NG_NODE_PRIVATE(node); struct ngpcb *pcbp; struct socket *so; struct sockaddr_ng addr; struct ng_mesg *msg; struct mbuf *m; ng_ID_t retaddr = NGI_RETADDR(item); int addrlen; int error = 0; NGI_GET_MSG(item, msg); NG_FREE_ITEM(item); /* * Grab priv->mtx here to prevent destroying of control socket * after checking that priv->ctlsock is not NULL. */ mtx_lock(&priv->mtx); pcbp = priv->ctlsock; /* * Only allow mesgs to be passed if we have the control socket. * Data sockets can only support the generic messages. */ if (pcbp == NULL) { mtx_unlock(&priv->mtx); TRAP_ERROR; NG_FREE_MSG(msg); return (EINVAL); } so = pcbp->ng_socket; SOCKBUF_LOCK(&so->so_rcv); /* As long as the race is handled, priv->mtx may be unlocked now. */ mtx_unlock(&priv->mtx); #ifdef TRACE_MESSAGES printf("[%x]:---------->[socket]: c=<%d>cmd=%x(%s) f=%x #%d\n", retaddr, msg->header.typecookie, msg->header.cmd, msg->header.cmdstr, msg->header.flags, msg->header.token); #endif if (msg->header.typecookie == NGM_SOCKET_COOKIE) { switch (msg->header.cmd) { case NGM_SOCK_CMD_NOLINGER: priv->flags |= NGS_FLAG_NOLINGER; break; case NGM_SOCK_CMD_LINGER: priv->flags &= ~NGS_FLAG_NOLINGER; break; default: error = EINVAL; /* unknown command */ } SOCKBUF_UNLOCK(&so->so_rcv); /* Free the message and return. */ NG_FREE_MSG(msg); return (error); } /* Get the return address into a sockaddr. */ bzero(&addr, sizeof(addr)); addr.sg_len = sizeof(addr); addr.sg_family = AF_NETGRAPH; addrlen = snprintf((char *)&addr.sg_data, sizeof(addr.sg_data), "[%x]:", retaddr); if (addrlen < 0 || addrlen > sizeof(addr.sg_data)) { SOCKBUF_UNLOCK(&so->so_rcv); printf("%s: snprintf([%x]) failed - %d\n", __func__, retaddr, addrlen); NG_FREE_MSG(msg); return (EINVAL); } /* Copy the message itself into an mbuf chain. */ m = m_devget((caddr_t)msg, sizeof(struct ng_mesg) + msg->header.arglen, 0, NULL, NULL); /* * Here we free the message. We need to do that * regardless of whether we got mbufs. */ NG_FREE_MSG(msg); if (m == NULL) { SOCKBUF_UNLOCK(&so->so_rcv); TRAP_ERROR; return (ENOBUFS); } /* Send it up to the socket. */ if (sbappendaddr_locked(&so->so_rcv, (struct sockaddr *)&addr, m, NULL) == 0) { soroverflow_locked(so); TRAP_ERROR; m_freem(m); return (ENOBUFS); } /* sorwakeup_locked () releases the lock internally. */ sorwakeup_locked(so); return (error); } /* * Receive data on a hook */ static int ngs_rcvdata(hook_p hook, item_p item) { struct ngsock *const priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct ngpcb *const pcbp = priv->datasock; struct socket *so; struct sockaddr_ng *addr; char *addrbuf[NG_HOOKSIZ + 4]; int addrlen; struct mbuf *m; NGI_GET_M(item, m); NG_FREE_ITEM(item); /* If there is no data socket, black-hole it. */ if (pcbp == NULL) { NG_FREE_M(m); return (0); } so = pcbp->ng_socket; /* Get the return address into a sockaddr. */ addrlen = strlen(NG_HOOK_NAME(hook)); /* <= NG_HOOKSIZ - 1 */ addr = (struct sockaddr_ng *) addrbuf; addr->sg_len = addrlen + 3; addr->sg_family = AF_NETGRAPH; bcopy(NG_HOOK_NAME(hook), addr->sg_data, addrlen); addr->sg_data[addrlen] = '\0'; /* Try to tell the socket which hook it came in on. */ SOCKBUF_LOCK(&so->so_rcv); if (sbappendaddr_locked(&so->so_rcv, (struct sockaddr *)addr, m, NULL) == 0) { SOCKBUF_UNLOCK(&so->so_rcv); m_freem(m); TRAP_ERROR; return (ENOBUFS); } /* sorwakeup_locked () releases the lock internally. */ sorwakeup_locked(so); return (0); } /* * Hook disconnection * * For this type, removal of the last link destroys the node * if the NOLINGER flag is set. */ static int ngs_disconnect(hook_p hook) { node_p node = NG_HOOK_NODE(hook); struct ngsock *const priv = NG_NODE_PRIVATE(node); struct hookpriv *hp = NG_HOOK_PRIVATE(hook); LIST_REMOVE(hp, next); free(hp, M_NETGRAPH_SOCK); if ((priv->datasock) && (priv->datasock->ng_socket)) { if (NG_NODE_NUMHOOKS(node) == 1) priv->datasock->ng_socket->so_state |= SS_ISCONNECTED; else priv->datasock->ng_socket->so_state &= ~SS_ISCONNECTED; } if ((priv->flags & NGS_FLAG_NOLINGER) && (NG_NODE_NUMHOOKS(node) == 0) && (NG_NODE_IS_VALID(node))) ng_rmnode_self(node); return (0); } /* * Do local shutdown processing. * In this case, that involves making sure the socket * knows we should be shutting down. */ static int ngs_shutdown(node_p node) { struct ngsock *const priv = NG_NODE_PRIVATE(node); struct ngpcb *dpcbp, *pcbp; mtx_lock(&priv->mtx); dpcbp = priv->datasock; pcbp = priv->ctlsock; if (dpcbp != NULL) soisdisconnected(dpcbp->ng_socket); if (pcbp != NULL) soisdisconnected(pcbp->ng_socket); priv->node = NULL; NG_NODE_SET_PRIVATE(node, NULL); ng_socket_free_priv(priv); NG_NODE_UNREF(node); return (0); } static void ng_socket_item_applied(void *context, int error) { struct ngsock *const priv = (struct ngsock *)context; mtx_lock(&priv->mtx); priv->error = error; wakeup(priv); mtx_unlock(&priv->mtx); } static int dummy_disconnect(struct socket *so) { return (0); } /* * Definitions of protocols supported in the NETGRAPH domain. * Control and data socket type descriptors * * XXXRW: Perhaps _close should do something? */ static struct protosw ngcontrol_protosw = { .pr_type = SOCK_DGRAM, .pr_protocol = NG_CONTROL, .pr_flags = PR_ATOMIC | PR_ADDR /* | PR_RIGHTS */, .pr_attach = ngc_attach, .pr_bind = ngc_bind, .pr_connect = ngc_connect, .pr_detach = ngc_detach, .pr_disconnect = dummy_disconnect, .pr_send = ngc_send, .pr_sockaddr = ng_getsockaddr, }; static struct protosw ngdata_protosw = { .pr_type = SOCK_DGRAM, .pr_protocol = NG_DATA, .pr_flags = PR_ATOMIC | PR_ADDR, .pr_attach = ngd_attach, .pr_connect = ngd_connect, .pr_detach = ngd_detach, .pr_disconnect = dummy_disconnect, .pr_send = ngd_send, .pr_sockaddr = ng_getsockaddr, }; static struct domain ngdomain = { .dom_family = AF_NETGRAPH, .dom_name = "netgraph", .dom_nprotosw = 2, .dom_protosw = { &ngcontrol_protosw, &ngdata_protosw }, }; /* * Handle loading and unloading for this node type. * This is to handle auxiliary linkages (e.g protocol domain addition). */ static int ngs_mod_event(module_t mod, int event, void *data) { int error = 0; switch (event) { case MOD_LOAD: mtx_init(&ngsocketlist_mtx, "ng_socketlist", NULL, MTX_DEF); break; case MOD_UNLOAD: /* Ensure there are no open netgraph sockets. */ if (!LIST_EMPTY(&ngsocklist)) { error = EBUSY; break; } #ifdef NOTYET /* Unregister protocol domain XXX can't do this yet.. */ #endif error = EBUSY; break; default: error = EOPNOTSUPP; break; } return (error); } DOMAIN_SET(ng); SYSCTL_INT(_net_graph, OID_AUTO, family, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, AF_NETGRAPH, ""); static SYSCTL_NODE(_net_graph, OID_AUTO, data, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "DATA"); SYSCTL_INT(_net_graph_data, OID_AUTO, proto, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, NG_DATA, ""); static SYSCTL_NODE(_net_graph, OID_AUTO, control, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "CONTROL"); SYSCTL_INT(_net_graph_control, OID_AUTO, proto, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, NG_CONTROL, ""); diff --git a/sys/netinet/in_pcb.c b/sys/netinet/in_pcb.c index 49e0b49e7e7e..797c0dc445dd 100644 --- a/sys/netinet/in_pcb.c +++ b/sys/netinet/in_pcb.c @@ -1,3557 +1,3540 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1991, 1993, 1995 * The Regents of the University of California. * Copyright (c) 2007-2009 Robert N. M. Watson * Copyright (c) 2010-2011 Juniper Networks, Inc. * Copyright (c) 2021-2022 Gleb Smirnoff * All rights reserved. * * Portions of this software were developed by Robert N. M. Watson under * contract to Juniper Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_ddb.h" #include "opt_ipsec.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ratelimit.h" #include "opt_route.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include #include #include #include #include #include #include #include #include #if defined(INET) || defined(INET6) #include #include #include #include #ifdef INET #include #include #endif #include #ifdef INET6 #include #include #include #include #endif /* INET6 */ #include #endif #include #include #define INPCBLBGROUP_SIZMIN 8 #define INPCBLBGROUP_SIZMAX 256 #define INP_FREED 0x00000200 /* Went through in_pcbfree(). */ #define INP_INLBGROUP 0x01000000 /* Inserted into inpcblbgroup. */ /* * These configure the range of local port addresses assigned to * "unspecified" outgoing connections/packets/whatever. */ VNET_DEFINE(int, ipport_lowfirstauto) = IPPORT_RESERVED - 1; /* 1023 */ VNET_DEFINE(int, ipport_lowlastauto) = IPPORT_RESERVEDSTART; /* 600 */ VNET_DEFINE(int, ipport_firstauto) = IPPORT_EPHEMERALFIRST; /* 10000 */ VNET_DEFINE(int, ipport_lastauto) = IPPORT_EPHEMERALLAST; /* 65535 */ VNET_DEFINE(int, ipport_hifirstauto) = IPPORT_HIFIRSTAUTO; /* 49152 */ VNET_DEFINE(int, ipport_hilastauto) = IPPORT_HILASTAUTO; /* 65535 */ /* * Reserved ports accessible only to root. There are significant * security considerations that must be accounted for when changing these, * but the security benefits can be great. Please be careful. */ VNET_DEFINE(int, ipport_reservedhigh) = IPPORT_RESERVED - 1; /* 1023 */ VNET_DEFINE(int, ipport_reservedlow); /* Enable random ephemeral port allocation by default. */ VNET_DEFINE(int, ipport_randomized) = 1; #ifdef INET static struct inpcb *in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport_arg, struct in_addr laddr, u_int lport_arg, int lookupflags, uint8_t numa_domain); #define RANGECHK(var, min, max) \ if ((var) < (min)) { (var) = (min); } \ else if ((var) > (max)) { (var) = (max); } static int sysctl_net_ipport_check(SYSCTL_HANDLER_ARGS) { int error; error = sysctl_handle_int(oidp, arg1, arg2, req); if (error == 0) { RANGECHK(V_ipport_lowfirstauto, 1, IPPORT_RESERVED - 1); RANGECHK(V_ipport_lowlastauto, 1, IPPORT_RESERVED - 1); RANGECHK(V_ipport_firstauto, IPPORT_RESERVED, IPPORT_MAX); RANGECHK(V_ipport_lastauto, IPPORT_RESERVED, IPPORT_MAX); RANGECHK(V_ipport_hifirstauto, IPPORT_RESERVED, IPPORT_MAX); RANGECHK(V_ipport_hilastauto, IPPORT_RESERVED, IPPORT_MAX); } return (error); } #undef RANGECHK static SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "IP Ports"); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &VNET_NAME(ipport_lowfirstauto), 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &VNET_NAME(ipport_lowlastauto), 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &VNET_NAME(ipport_firstauto), 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &VNET_NAME(ipport_lastauto), 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &VNET_NAME(ipport_hifirstauto), 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &VNET_NAME(ipport_hilastauto), 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_INT(_net_inet_ip_portrange, OID_AUTO, reservedhigh, CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE, &VNET_NAME(ipport_reservedhigh), 0, ""); SYSCTL_INT(_net_inet_ip_portrange, OID_AUTO, reservedlow, CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedlow), 0, ""); SYSCTL_INT(_net_inet_ip_portrange, OID_AUTO, randomized, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipport_randomized), 0, "Enable random port allocation"); #ifdef RATELIMIT counter_u64_t rate_limit_new; counter_u64_t rate_limit_chg; counter_u64_t rate_limit_active; counter_u64_t rate_limit_alloc_fail; counter_u64_t rate_limit_set_ok; static SYSCTL_NODE(_net_inet_ip, OID_AUTO, rl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "IP Rate Limiting"); SYSCTL_COUNTER_U64(_net_inet_ip_rl, OID_AUTO, active, CTLFLAG_RD, &rate_limit_active, "Active rate limited connections"); SYSCTL_COUNTER_U64(_net_inet_ip_rl, OID_AUTO, alloc_fail, CTLFLAG_RD, &rate_limit_alloc_fail, "Rate limited connection failures"); SYSCTL_COUNTER_U64(_net_inet_ip_rl, OID_AUTO, set_ok, CTLFLAG_RD, &rate_limit_set_ok, "Rate limited setting succeeded"); SYSCTL_COUNTER_U64(_net_inet_ip_rl, OID_AUTO, newrl, CTLFLAG_RD, &rate_limit_new, "Total Rate limit new attempts"); SYSCTL_COUNTER_U64(_net_inet_ip_rl, OID_AUTO, chgrl, CTLFLAG_RD, &rate_limit_chg, "Total Rate limited change attempts"); #endif /* RATELIMIT */ #endif /* INET */ VNET_DEFINE(uint32_t, in_pcbhashseed); static void in_pcbhashseed_init(void) { V_in_pcbhashseed = arc4random(); } VNET_SYSINIT(in_pcbhashseed_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST, in_pcbhashseed_init, 0); static void in_pcbremhash(struct inpcb *); /* * in_pcb.c: manage the Protocol Control Blocks. * * NOTE: It is assumed that most of these functions will be called with * the pcbinfo lock held, and often, the inpcb lock held, as these utility * functions often modify hash chains or addresses in pcbs. */ static struct inpcblbgroup * in_pcblbgroup_alloc(struct inpcblbgrouphead *hdr, struct ucred *cred, u_char vflag, uint16_t port, const union in_dependaddr *addr, int size, uint8_t numa_domain) { struct inpcblbgroup *grp; size_t bytes; bytes = __offsetof(struct inpcblbgroup, il_inp[size]); grp = malloc(bytes, M_PCB, M_ZERO | M_NOWAIT); if (grp == NULL) return (NULL); grp->il_cred = crhold(cred); grp->il_vflag = vflag; grp->il_lport = port; grp->il_numa_domain = numa_domain; grp->il_dependladdr = *addr; grp->il_inpsiz = size; CK_LIST_INSERT_HEAD(hdr, grp, il_list); return (grp); } static void in_pcblbgroup_free_deferred(epoch_context_t ctx) { struct inpcblbgroup *grp; grp = __containerof(ctx, struct inpcblbgroup, il_epoch_ctx); crfree(grp->il_cred); free(grp, M_PCB); } static void in_pcblbgroup_free(struct inpcblbgroup *grp) { CK_LIST_REMOVE(grp, il_list); NET_EPOCH_CALL(in_pcblbgroup_free_deferred, &grp->il_epoch_ctx); } static struct inpcblbgroup * in_pcblbgroup_resize(struct inpcblbgrouphead *hdr, struct inpcblbgroup *old_grp, int size) { struct inpcblbgroup *grp; int i; grp = in_pcblbgroup_alloc(hdr, old_grp->il_cred, old_grp->il_vflag, old_grp->il_lport, &old_grp->il_dependladdr, size, old_grp->il_numa_domain); if (grp == NULL) return (NULL); KASSERT(old_grp->il_inpcnt < grp->il_inpsiz, ("invalid new local group size %d and old local group count %d", grp->il_inpsiz, old_grp->il_inpcnt)); for (i = 0; i < old_grp->il_inpcnt; ++i) grp->il_inp[i] = old_grp->il_inp[i]; grp->il_inpcnt = old_grp->il_inpcnt; in_pcblbgroup_free(old_grp); return (grp); } /* * PCB at index 'i' is removed from the group. Pull up the ones below il_inp[i] * and shrink group if possible. */ static void in_pcblbgroup_reorder(struct inpcblbgrouphead *hdr, struct inpcblbgroup **grpp, int i) { struct inpcblbgroup *grp, *new_grp; grp = *grpp; for (; i + 1 < grp->il_inpcnt; ++i) grp->il_inp[i] = grp->il_inp[i + 1]; grp->il_inpcnt--; if (grp->il_inpsiz > INPCBLBGROUP_SIZMIN && grp->il_inpcnt <= grp->il_inpsiz / 4) { /* Shrink this group. */ new_grp = in_pcblbgroup_resize(hdr, grp, grp->il_inpsiz / 2); if (new_grp != NULL) *grpp = new_grp; } } /* * Add PCB to load balance group for SO_REUSEPORT_LB option. */ static int in_pcbinslbgrouphash(struct inpcb *inp, uint8_t numa_domain) { const static struct timeval interval = { 60, 0 }; static struct timeval lastprint; struct inpcbinfo *pcbinfo; struct inpcblbgrouphead *hdr; struct inpcblbgroup *grp; uint32_t idx; pcbinfo = inp->inp_pcbinfo; INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(pcbinfo); #ifdef INET6 /* * Don't allow IPv4 mapped INET6 wild socket. */ if ((inp->inp_vflag & INP_IPV4) && inp->inp_laddr.s_addr == INADDR_ANY && INP_CHECK_SOCKAF(inp->inp_socket, AF_INET6)) { return (0); } #endif idx = INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_lbgrouphashmask); hdr = &pcbinfo->ipi_lbgrouphashbase[idx]; CK_LIST_FOREACH(grp, hdr, il_list) { if (grp->il_cred->cr_prison == inp->inp_cred->cr_prison && grp->il_vflag == inp->inp_vflag && grp->il_lport == inp->inp_lport && grp->il_numa_domain == numa_domain && memcmp(&grp->il_dependladdr, &inp->inp_inc.inc_ie.ie_dependladdr, sizeof(grp->il_dependladdr)) == 0) { break; } } if (grp == NULL) { /* Create new load balance group. */ grp = in_pcblbgroup_alloc(hdr, inp->inp_cred, inp->inp_vflag, inp->inp_lport, &inp->inp_inc.inc_ie.ie_dependladdr, INPCBLBGROUP_SIZMIN, numa_domain); if (grp == NULL) return (ENOBUFS); } else if (grp->il_inpcnt == grp->il_inpsiz) { if (grp->il_inpsiz >= INPCBLBGROUP_SIZMAX) { if (ratecheck(&lastprint, &interval)) printf("lb group port %d, limit reached\n", ntohs(grp->il_lport)); return (0); } /* Expand this local group. */ grp = in_pcblbgroup_resize(hdr, grp, grp->il_inpsiz * 2); if (grp == NULL) return (ENOBUFS); } KASSERT(grp->il_inpcnt < grp->il_inpsiz, ("invalid local group size %d and count %d", grp->il_inpsiz, grp->il_inpcnt)); grp->il_inp[grp->il_inpcnt] = inp; grp->il_inpcnt++; inp->inp_flags |= INP_INLBGROUP; return (0); } /* * Remove PCB from load balance group. */ static void in_pcbremlbgrouphash(struct inpcb *inp) { struct inpcbinfo *pcbinfo; struct inpcblbgrouphead *hdr; struct inpcblbgroup *grp; int i; pcbinfo = inp->inp_pcbinfo; INP_WLOCK_ASSERT(inp); MPASS(inp->inp_flags & INP_INLBGROUP); INP_HASH_WLOCK_ASSERT(pcbinfo); hdr = &pcbinfo->ipi_lbgrouphashbase[ INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_lbgrouphashmask)]; CK_LIST_FOREACH(grp, hdr, il_list) { for (i = 0; i < grp->il_inpcnt; ++i) { if (grp->il_inp[i] != inp) continue; if (grp->il_inpcnt == 1) { /* We are the last, free this local group. */ in_pcblbgroup_free(grp); } else { /* Pull up inpcbs, shrink group if possible. */ in_pcblbgroup_reorder(hdr, &grp, i); } inp->inp_flags &= ~INP_INLBGROUP; return; } } KASSERT(0, ("%s: did not find %p", __func__, inp)); } int in_pcblbgroup_numa(struct inpcb *inp, int arg) { struct inpcbinfo *pcbinfo; struct inpcblbgrouphead *hdr; struct inpcblbgroup *grp; int err, i; uint8_t numa_domain; switch (arg) { case TCP_REUSPORT_LB_NUMA_NODOM: numa_domain = M_NODOM; break; case TCP_REUSPORT_LB_NUMA_CURDOM: numa_domain = PCPU_GET(domain); break; default: if (arg < 0 || arg >= vm_ndomains) return (EINVAL); numa_domain = arg; } err = 0; pcbinfo = inp->inp_pcbinfo; INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK(pcbinfo); hdr = &pcbinfo->ipi_lbgrouphashbase[ INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_lbgrouphashmask)]; CK_LIST_FOREACH(grp, hdr, il_list) { for (i = 0; i < grp->il_inpcnt; ++i) { if (grp->il_inp[i] != inp) continue; if (grp->il_numa_domain == numa_domain) { goto abort_with_hash_wlock; } /* Remove it from the old group. */ in_pcbremlbgrouphash(inp); /* Add it to the new group based on numa domain. */ in_pcbinslbgrouphash(inp, numa_domain); goto abort_with_hash_wlock; } } err = ENOENT; abort_with_hash_wlock: INP_HASH_WUNLOCK(pcbinfo); return (err); } /* Make sure it is safe to use hashinit(9) on CK_LIST. */ CTASSERT(sizeof(struct inpcbhead) == sizeof(LIST_HEAD(, inpcb))); /* * Initialize an inpcbinfo - a per-VNET instance of connections db. */ void in_pcbinfo_init(struct inpcbinfo *pcbinfo, struct inpcbstorage *pcbstor, u_int hash_nelements, u_int porthash_nelements) { mtx_init(&pcbinfo->ipi_lock, pcbstor->ips_infolock_name, NULL, MTX_DEF); mtx_init(&pcbinfo->ipi_hash_lock, pcbstor->ips_hashlock_name, NULL, MTX_DEF); #ifdef VIMAGE pcbinfo->ipi_vnet = curvnet; #endif CK_LIST_INIT(&pcbinfo->ipi_listhead); pcbinfo->ipi_count = 0; pcbinfo->ipi_hash_exact = hashinit(hash_nelements, M_PCB, &pcbinfo->ipi_hashmask); pcbinfo->ipi_hash_wild = hashinit(hash_nelements, M_PCB, &pcbinfo->ipi_hashmask); porthash_nelements = imin(porthash_nelements, IPPORT_MAX + 1); pcbinfo->ipi_porthashbase = hashinit(porthash_nelements, M_PCB, &pcbinfo->ipi_porthashmask); pcbinfo->ipi_lbgrouphashbase = hashinit(porthash_nelements, M_PCB, &pcbinfo->ipi_lbgrouphashmask); pcbinfo->ipi_zone = pcbstor->ips_zone; pcbinfo->ipi_portzone = pcbstor->ips_portzone; pcbinfo->ipi_smr = uma_zone_get_smr(pcbinfo->ipi_zone); } /* * Destroy an inpcbinfo. */ void in_pcbinfo_destroy(struct inpcbinfo *pcbinfo) { KASSERT(pcbinfo->ipi_count == 0, ("%s: ipi_count = %u", __func__, pcbinfo->ipi_count)); hashdestroy(pcbinfo->ipi_hash_exact, M_PCB, pcbinfo->ipi_hashmask); hashdestroy(pcbinfo->ipi_hash_wild, M_PCB, pcbinfo->ipi_hashmask); hashdestroy(pcbinfo->ipi_porthashbase, M_PCB, pcbinfo->ipi_porthashmask); hashdestroy(pcbinfo->ipi_lbgrouphashbase, M_PCB, pcbinfo->ipi_lbgrouphashmask); mtx_destroy(&pcbinfo->ipi_hash_lock); mtx_destroy(&pcbinfo->ipi_lock); } /* * Initialize a pcbstorage - per protocol zones to allocate inpcbs. */ static void inpcb_fini(void *, int); void in_pcbstorage_init(void *arg) { struct inpcbstorage *pcbstor = arg; pcbstor->ips_zone = uma_zcreate(pcbstor->ips_zone_name, pcbstor->ips_size, NULL, NULL, pcbstor->ips_pcbinit, inpcb_fini, UMA_ALIGN_CACHE, UMA_ZONE_SMR); pcbstor->ips_portzone = uma_zcreate(pcbstor->ips_portzone_name, sizeof(struct inpcbport), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); uma_zone_set_smr(pcbstor->ips_portzone, uma_zone_get_smr(pcbstor->ips_zone)); } /* * Destroy a pcbstorage - used by unloadable protocols. */ void in_pcbstorage_destroy(void *arg) { struct inpcbstorage *pcbstor = arg; uma_zdestroy(pcbstor->ips_zone); uma_zdestroy(pcbstor->ips_portzone); } /* * Allocate a PCB and associate it with the socket. * On success return with the PCB locked. */ int in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo) { struct inpcb *inp; #if defined(IPSEC) || defined(IPSEC_SUPPORT) || defined(MAC) int error; #endif inp = uma_zalloc_smr(pcbinfo->ipi_zone, M_NOWAIT); if (inp == NULL) return (ENOBUFS); bzero(&inp->inp_start_zero, inp_zero_size); #ifdef NUMA inp->inp_numa_domain = M_NODOM; #endif inp->inp_pcbinfo = pcbinfo; inp->inp_socket = so; inp->inp_cred = crhold(so->so_cred); inp->inp_inc.inc_fibnum = so->so_fibnum; #ifdef MAC error = mac_inpcb_init(inp, M_NOWAIT); if (error != 0) goto out; mac_inpcb_create(so, inp); #endif #if defined(IPSEC) || defined(IPSEC_SUPPORT) error = ipsec_init_pcbpolicy(inp); if (error != 0) { #ifdef MAC mac_inpcb_destroy(inp); #endif goto out; } #endif /*IPSEC*/ #ifdef INET6 if (INP_SOCKAF(so) == AF_INET6) { inp->inp_vflag |= INP_IPV6PROTO | INP_IPV6; if (V_ip6_v6only) inp->inp_flags |= IN6P_IPV6_V6ONLY; #ifdef INET else inp->inp_vflag |= INP_IPV4; #endif if (V_ip6_auto_flowlabel) inp->inp_flags |= IN6P_AUTOFLOWLABEL; inp->in6p_hops = -1; /* use kernel default */ } #endif #if defined(INET) && defined(INET6) else #endif #ifdef INET inp->inp_vflag |= INP_IPV4; #endif inp->inp_smr = SMR_SEQ_INVALID; /* * Routes in inpcb's can cache L2 as well; they are guaranteed * to be cleaned up. */ inp->inp_route.ro_flags = RT_LLE_CACHE; refcount_init(&inp->inp_refcount, 1); /* Reference from socket. */ INP_WLOCK(inp); INP_INFO_WLOCK(pcbinfo); pcbinfo->ipi_count++; inp->inp_gencnt = ++pcbinfo->ipi_gencnt; CK_LIST_INSERT_HEAD(&pcbinfo->ipi_listhead, inp, inp_list); INP_INFO_WUNLOCK(pcbinfo); so->so_pcb = inp; return (0); #if defined(IPSEC) || defined(IPSEC_SUPPORT) || defined(MAC) out: uma_zfree_smr(pcbinfo->ipi_zone, inp); return (error); #endif } #ifdef INET int in_pcbbind(struct inpcb *inp, struct sockaddr_in *sin, struct ucred *cred) { int anonport, error; KASSERT(sin == NULL || sin->sin_family == AF_INET, ("%s: invalid address family for %p", __func__, sin)); KASSERT(sin == NULL || sin->sin_len == sizeof(struct sockaddr_in), ("%s: invalid address length for %p", __func__, sin)); INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) return (EINVAL); anonport = sin == NULL || sin->sin_port == 0; error = in_pcbbind_setup(inp, sin, &inp->inp_laddr.s_addr, &inp->inp_lport, cred); if (error) return (error); if (in_pcbinshash(inp) != 0) { inp->inp_laddr.s_addr = INADDR_ANY; inp->inp_lport = 0; return (EAGAIN); } if (anonport) inp->inp_flags |= INP_ANONPORT; return (0); } #endif #if defined(INET) || defined(INET6) /* * Assign a local port like in_pcb_lport(), but also used with connect() * and a foreign address and port. If fsa is non-NULL, choose a local port * that is unused with those, otherwise one that is completely unused. * lsa can be NULL for IPv6. */ int in_pcb_lport_dest(struct inpcb *inp, struct sockaddr *lsa, u_short *lportp, struct sockaddr *fsa, u_short fport, struct ucred *cred, int lookupflags) { struct inpcbinfo *pcbinfo; struct inpcb *tmpinp; unsigned short *lastport; int count, error; u_short aux, first, last, lport; #ifdef INET struct in_addr laddr, faddr; #endif #ifdef INET6 struct in6_addr *laddr6, *faddr6; #endif pcbinfo = inp->inp_pcbinfo; /* * Because no actual state changes occur here, a global write lock on * the pcbinfo isn't required. */ INP_LOCK_ASSERT(inp); INP_HASH_LOCK_ASSERT(pcbinfo); if (inp->inp_flags & INP_HIGHPORT) { first = V_ipport_hifirstauto; /* sysctl */ last = V_ipport_hilastauto; lastport = &pcbinfo->ipi_lasthi; } else if (inp->inp_flags & INP_LOWPORT) { error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT); if (error) return (error); first = V_ipport_lowfirstauto; /* 1023 */ last = V_ipport_lowlastauto; /* 600 */ lastport = &pcbinfo->ipi_lastlow; } else { first = V_ipport_firstauto; /* sysctl */ last = V_ipport_lastauto; lastport = &pcbinfo->ipi_lastport; } /* * Instead of having two loops further down counting up or down * make sure that first is always <= last and go with only one * code path implementing all logic. */ if (first > last) { aux = first; first = last; last = aux; } #ifdef INET laddr.s_addr = INADDR_ANY; /* used by INET6+INET below too */ if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4) { if (lsa != NULL) laddr = ((struct sockaddr_in *)lsa)->sin_addr; if (fsa != NULL) faddr = ((struct sockaddr_in *)fsa)->sin_addr; } #endif #ifdef INET6 laddr6 = NULL; if ((inp->inp_vflag & INP_IPV6) != 0) { if (lsa != NULL) laddr6 = &((struct sockaddr_in6 *)lsa)->sin6_addr; if (fsa != NULL) faddr6 = &((struct sockaddr_in6 *)fsa)->sin6_addr; } #endif tmpinp = NULL; lport = *lportp; if (V_ipport_randomized) *lastport = first + (arc4random() % (last - first)); count = last - first; do { if (count-- < 0) /* completely used? */ return (EADDRNOTAVAIL); ++*lastport; if (*lastport < first || *lastport > last) *lastport = first; lport = htons(*lastport); if (fsa != NULL) { #ifdef INET if (lsa->sa_family == AF_INET) { tmpinp = in_pcblookup_hash_locked(pcbinfo, faddr, fport, laddr, lport, lookupflags, M_NODOM); } #endif #ifdef INET6 if (lsa->sa_family == AF_INET6) { tmpinp = in6_pcblookup_hash_locked(pcbinfo, faddr6, fport, laddr6, lport, lookupflags, M_NODOM); } #endif } else { #ifdef INET6 if ((inp->inp_vflag & INP_IPV6) != 0) { tmpinp = in6_pcblookup_local(pcbinfo, &inp->in6p_laddr, lport, lookupflags, cred); #ifdef INET if (tmpinp == NULL && (inp->inp_vflag & INP_IPV4)) tmpinp = in_pcblookup_local(pcbinfo, laddr, lport, lookupflags, cred); #endif } #endif #if defined(INET) && defined(INET6) else #endif #ifdef INET tmpinp = in_pcblookup_local(pcbinfo, laddr, lport, lookupflags, cred); #endif } } while (tmpinp != NULL); *lportp = lport; return (0); } /* * Select a local port (number) to use. */ int in_pcb_lport(struct inpcb *inp, struct in_addr *laddrp, u_short *lportp, struct ucred *cred, int lookupflags) { struct sockaddr_in laddr; if (laddrp) { bzero(&laddr, sizeof(laddr)); laddr.sin_family = AF_INET; laddr.sin_addr = *laddrp; } return (in_pcb_lport_dest(inp, laddrp ? (struct sockaddr *) &laddr : NULL, lportp, NULL, 0, cred, lookupflags)); } #endif /* INET || INET6 */ #ifdef INET /* * Set up a bind operation on a PCB, performing port allocation * as required, but do not actually modify the PCB. Callers can * either complete the bind by setting inp_laddr/inp_lport and * calling in_pcbinshash(), or they can just use the resulting * port and address to authorise the sending of a once-off packet. * * On error, the values of *laddrp and *lportp are not changed. */ int in_pcbbind_setup(struct inpcb *inp, struct sockaddr_in *sin, in_addr_t *laddrp, u_short *lportp, struct ucred *cred) { struct socket *so = inp->inp_socket; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; struct in_addr laddr; u_short lport = 0; int lookupflags = 0, reuseport = (so->so_options & SO_REUSEPORT); int error; /* * XXX: Maybe we could let SO_REUSEPORT_LB set SO_REUSEPORT bit here * so that we don't have to add to the (already messy) code below. */ int reuseport_lb = (so->so_options & SO_REUSEPORT_LB); /* * No state changes, so read locks are sufficient here. */ INP_LOCK_ASSERT(inp); INP_HASH_LOCK_ASSERT(pcbinfo); laddr.s_addr = *laddrp; if (sin != NULL && laddr.s_addr != INADDR_ANY) return (EINVAL); if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT|SO_REUSEPORT_LB)) == 0) lookupflags = INPLOOKUP_WILDCARD; if (sin == NULL) { if ((error = prison_local_ip4(cred, &laddr)) != 0) return (error); } else { KASSERT(sin->sin_family == AF_INET, ("%s: invalid family for address %p", __func__, sin)); KASSERT(sin->sin_len == sizeof(*sin), ("%s: invalid length for address %p", __func__, sin)); error = prison_local_ip4(cred, &sin->sin_addr); if (error) return (error); if (sin->sin_port != *lportp) { /* Don't allow the port to change. */ if (*lportp != 0) return (EINVAL); lport = sin->sin_port; } /* NB: lport is left as 0 if the port isn't being changed. */ if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { /* * Treat SO_REUSEADDR as SO_REUSEPORT for multicast; * allow complete duplication of binding if * SO_REUSEPORT is set, or if SO_REUSEADDR is set * and a multicast address is bound on both * new and duplicated sockets. */ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) != 0) reuseport = SO_REUSEADDR|SO_REUSEPORT; /* * XXX: How to deal with SO_REUSEPORT_LB here? * Treat same as SO_REUSEPORT for now. */ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT_LB)) != 0) reuseport_lb = SO_REUSEADDR|SO_REUSEPORT_LB; } else if (sin->sin_addr.s_addr != INADDR_ANY) { sin->sin_port = 0; /* yech... */ bzero(&sin->sin_zero, sizeof(sin->sin_zero)); /* * Is the address a local IP address? * If INP_BINDANY is set, then the socket may be bound * to any endpoint address, local or not. */ if ((inp->inp_flags & INP_BINDANY) == 0 && ifa_ifwithaddr_check((struct sockaddr *)sin) == 0) return (EADDRNOTAVAIL); } laddr = sin->sin_addr; if (lport) { struct inpcb *t; /* GROSS */ if (ntohs(lport) <= V_ipport_reservedhigh && ntohs(lport) >= V_ipport_reservedlow && priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT)) return (EACCES); if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) && priv_check_cred(inp->inp_cred, PRIV_NETINET_REUSEPORT) != 0) { t = in_pcblookup_local(pcbinfo, sin->sin_addr, lport, INPLOOKUP_WILDCARD, cred); /* * XXX * This entire block sorely needs a rewrite. */ if (t != NULL && (so->so_type != SOCK_STREAM || ntohl(t->inp_faddr.s_addr) == INADDR_ANY) && (ntohl(sin->sin_addr.s_addr) != INADDR_ANY || ntohl(t->inp_laddr.s_addr) != INADDR_ANY || (t->inp_socket->so_options & SO_REUSEPORT) || (t->inp_socket->so_options & SO_REUSEPORT_LB) == 0) && (inp->inp_cred->cr_uid != t->inp_cred->cr_uid)) return (EADDRINUSE); } t = in_pcblookup_local(pcbinfo, sin->sin_addr, lport, lookupflags, cred); if (t != NULL && (reuseport & t->inp_socket->so_options) == 0 && (reuseport_lb & t->inp_socket->so_options) == 0) { #ifdef INET6 if (ntohl(sin->sin_addr.s_addr) != INADDR_ANY || ntohl(t->inp_laddr.s_addr) != INADDR_ANY || (inp->inp_vflag & INP_IPV6PROTO) == 0 || (t->inp_vflag & INP_IPV6PROTO) == 0) #endif return (EADDRINUSE); } } } if (*lportp != 0) lport = *lportp; if (lport == 0) { error = in_pcb_lport(inp, &laddr, &lport, cred, lookupflags); if (error != 0) return (error); } *laddrp = laddr.s_addr; *lportp = lport; return (0); } /* * Connect from a socket to a specified address. * Both address and port must be specified in argument sin. * If don't have a local address for this socket yet, * then pick one. */ int in_pcbconnect(struct inpcb *inp, struct sockaddr_in *sin, struct ucred *cred, bool rehash __unused) { u_short lport, fport; in_addr_t laddr, faddr; int anonport, error; INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); KASSERT(in_nullhost(inp->inp_faddr), ("%s: inp is already connected", __func__)); lport = inp->inp_lport; laddr = inp->inp_laddr.s_addr; anonport = (lport == 0); error = in_pcbconnect_setup(inp, sin, &laddr, &lport, &faddr, &fport, cred); if (error) return (error); inp->inp_faddr.s_addr = faddr; inp->inp_fport = fport; /* Do the initial binding of the local address if required. */ if (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0) { inp->inp_lport = lport; inp->inp_laddr.s_addr = laddr; if (in_pcbinshash(inp) != 0) { inp->inp_laddr.s_addr = inp->inp_faddr.s_addr = INADDR_ANY; inp->inp_lport = inp->inp_fport = 0; return (EAGAIN); } } else { inp->inp_lport = lport; inp->inp_laddr.s_addr = laddr; if ((inp->inp_flags & INP_INHASHLIST) != 0) in_pcbrehash(inp); else in_pcbinshash(inp); } if (anonport) inp->inp_flags |= INP_ANONPORT; return (0); } /* * Do proper source address selection on an unbound socket in case * of connect. Take jails into account as well. */ int in_pcbladdr(struct inpcb *inp, struct in_addr *faddr, struct in_addr *laddr, struct ucred *cred) { struct ifaddr *ifa; struct sockaddr *sa; struct sockaddr_in *sin, dst; struct nhop_object *nh; int error; NET_EPOCH_ASSERT(); KASSERT(laddr != NULL, ("%s: laddr NULL", __func__)); /* * Bypass source address selection and use the primary jail IP * if requested. */ if (!prison_saddrsel_ip4(cred, laddr)) return (0); error = 0; nh = NULL; bzero(&dst, sizeof(dst)); sin = &dst; sin->sin_family = AF_INET; sin->sin_len = sizeof(struct sockaddr_in); sin->sin_addr.s_addr = faddr->s_addr; /* * If route is known our src addr is taken from the i/f, * else punt. * * Find out route to destination. */ if ((inp->inp_socket->so_options & SO_DONTROUTE) == 0) nh = fib4_lookup(inp->inp_inc.inc_fibnum, *faddr, 0, NHR_NONE, 0); /* * If we found a route, use the address corresponding to * the outgoing interface. * * Otherwise assume faddr is reachable on a directly connected * network and try to find a corresponding interface to take * the source address from. */ if (nh == NULL || nh->nh_ifp == NULL) { struct in_ifaddr *ia; struct ifnet *ifp; ia = ifatoia(ifa_ifwithdstaddr((struct sockaddr *)sin, inp->inp_socket->so_fibnum)); if (ia == NULL) { ia = ifatoia(ifa_ifwithnet((struct sockaddr *)sin, 0, inp->inp_socket->so_fibnum)); } if (ia == NULL) { error = ENETUNREACH; goto done; } if (!prison_flag(cred, PR_IP4)) { laddr->s_addr = ia->ia_addr.sin_addr.s_addr; goto done; } ifp = ia->ia_ifp; ia = NULL; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { sa = ifa->ifa_addr; if (sa->sa_family != AF_INET) continue; sin = (struct sockaddr_in *)sa; if (prison_check_ip4(cred, &sin->sin_addr) == 0) { ia = (struct in_ifaddr *)ifa; break; } } if (ia != NULL) { laddr->s_addr = ia->ia_addr.sin_addr.s_addr; goto done; } /* 3. As a last resort return the 'default' jail address. */ error = prison_get_ip4(cred, laddr); goto done; } /* * If the outgoing interface on the route found is not * a loopback interface, use the address from that interface. * In case of jails do those three steps: * 1. check if the interface address belongs to the jail. If so use it. * 2. check if we have any address on the outgoing interface * belonging to this jail. If so use it. * 3. as a last resort return the 'default' jail address. */ if ((nh->nh_ifp->if_flags & IFF_LOOPBACK) == 0) { struct in_ifaddr *ia; struct ifnet *ifp; /* If not jailed, use the default returned. */ if (!prison_flag(cred, PR_IP4)) { ia = (struct in_ifaddr *)nh->nh_ifa; laddr->s_addr = ia->ia_addr.sin_addr.s_addr; goto done; } /* Jailed. */ /* 1. Check if the iface address belongs to the jail. */ sin = (struct sockaddr_in *)nh->nh_ifa->ifa_addr; if (prison_check_ip4(cred, &sin->sin_addr) == 0) { ia = (struct in_ifaddr *)nh->nh_ifa; laddr->s_addr = ia->ia_addr.sin_addr.s_addr; goto done; } /* * 2. Check if we have any address on the outgoing interface * belonging to this jail. */ ia = NULL; ifp = nh->nh_ifp; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { sa = ifa->ifa_addr; if (sa->sa_family != AF_INET) continue; sin = (struct sockaddr_in *)sa; if (prison_check_ip4(cred, &sin->sin_addr) == 0) { ia = (struct in_ifaddr *)ifa; break; } } if (ia != NULL) { laddr->s_addr = ia->ia_addr.sin_addr.s_addr; goto done; } /* 3. As a last resort return the 'default' jail address. */ error = prison_get_ip4(cred, laddr); goto done; } /* * The outgoing interface is marked with 'loopback net', so a route * to ourselves is here. * Try to find the interface of the destination address and then * take the address from there. That interface is not necessarily * a loopback interface. * In case of jails, check that it is an address of the jail * and if we cannot find, fall back to the 'default' jail address. */ if ((nh->nh_ifp->if_flags & IFF_LOOPBACK) != 0) { struct in_ifaddr *ia; ia = ifatoia(ifa_ifwithdstaddr(sintosa(&dst), inp->inp_socket->so_fibnum)); if (ia == NULL) ia = ifatoia(ifa_ifwithnet(sintosa(&dst), 0, inp->inp_socket->so_fibnum)); if (ia == NULL) ia = ifatoia(ifa_ifwithaddr(sintosa(&dst))); if (!prison_flag(cred, PR_IP4)) { if (ia == NULL) { error = ENETUNREACH; goto done; } laddr->s_addr = ia->ia_addr.sin_addr.s_addr; goto done; } /* Jailed. */ if (ia != NULL) { struct ifnet *ifp; ifp = ia->ia_ifp; ia = NULL; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { sa = ifa->ifa_addr; if (sa->sa_family != AF_INET) continue; sin = (struct sockaddr_in *)sa; if (prison_check_ip4(cred, &sin->sin_addr) == 0) { ia = (struct in_ifaddr *)ifa; break; } } if (ia != NULL) { laddr->s_addr = ia->ia_addr.sin_addr.s_addr; goto done; } } /* 3. As a last resort return the 'default' jail address. */ error = prison_get_ip4(cred, laddr); goto done; } done: if (error == 0 && laddr->s_addr == INADDR_ANY) return (EHOSTUNREACH); return (error); } /* * Set up for a connect from a socket to the specified address. * On entry, *laddrp and *lportp should contain the current local * address and port for the PCB; these are updated to the values * that should be placed in inp_laddr and inp_lport to complete * the connect. * * On success, *faddrp and *fportp will be set to the remote address * and port. These are not updated in the error case. */ int in_pcbconnect_setup(struct inpcb *inp, struct sockaddr_in *sin, in_addr_t *laddrp, u_short *lportp, in_addr_t *faddrp, u_short *fportp, struct ucred *cred) { struct in_ifaddr *ia; struct in_addr laddr, faddr; u_short lport, fport; int error; KASSERT(sin->sin_family == AF_INET, ("%s: invalid address family for %p", __func__, sin)); KASSERT(sin->sin_len == sizeof(*sin), ("%s: invalid address length for %p", __func__, sin)); /* * Because a global state change doesn't actually occur here, a read * lock is sufficient. */ NET_EPOCH_ASSERT(); INP_LOCK_ASSERT(inp); INP_HASH_LOCK_ASSERT(inp->inp_pcbinfo); if (sin->sin_port == 0) return (EADDRNOTAVAIL); laddr.s_addr = *laddrp; lport = *lportp; faddr = sin->sin_addr; fport = sin->sin_port; #ifdef ROUTE_MPATH if (CALC_FLOWID_OUTBOUND) { uint32_t hash_val, hash_type; hash_val = fib4_calc_software_hash(laddr, faddr, 0, fport, inp->inp_socket->so_proto->pr_protocol, &hash_type); inp->inp_flowid = hash_val; inp->inp_flowtype = hash_type; } #endif if (!CK_STAILQ_EMPTY(&V_in_ifaddrhead)) { /* * If the destination address is INADDR_ANY, * use the primary local address. * If the supplied address is INADDR_BROADCAST, * and the primary interface supports broadcast, * choose the broadcast address for that interface. */ if (faddr.s_addr == INADDR_ANY) { faddr = IA_SIN(CK_STAILQ_FIRST(&V_in_ifaddrhead))->sin_addr; if ((error = prison_get_ip4(cred, &faddr)) != 0) return (error); } else if (faddr.s_addr == (u_long)INADDR_BROADCAST) { if (CK_STAILQ_FIRST(&V_in_ifaddrhead)->ia_ifp->if_flags & IFF_BROADCAST) faddr = satosin(&CK_STAILQ_FIRST( &V_in_ifaddrhead)->ia_broadaddr)->sin_addr; } } if (laddr.s_addr == INADDR_ANY) { error = in_pcbladdr(inp, &faddr, &laddr, cred); /* * If the destination address is multicast and an outgoing * interface has been set as a multicast option, prefer the * address of that interface as our source address. */ if (IN_MULTICAST(ntohl(faddr.s_addr)) && inp->inp_moptions != NULL) { struct ip_moptions *imo; struct ifnet *ifp; imo = inp->inp_moptions; if (imo->imo_multicast_ifp != NULL) { ifp = imo->imo_multicast_ifp; CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { if (ia->ia_ifp == ifp && prison_check_ip4(cred, &ia->ia_addr.sin_addr) == 0) break; } if (ia == NULL) error = EADDRNOTAVAIL; else { laddr = ia->ia_addr.sin_addr; error = 0; } } } if (error) return (error); } if (lport != 0) { if (in_pcblookup_hash_locked(inp->inp_pcbinfo, faddr, fport, laddr, lport, 0, M_NODOM) != NULL) return (EADDRINUSE); } else { struct sockaddr_in lsin, fsin; bzero(&lsin, sizeof(lsin)); bzero(&fsin, sizeof(fsin)); lsin.sin_family = AF_INET; lsin.sin_addr = laddr; fsin.sin_family = AF_INET; fsin.sin_addr = faddr; error = in_pcb_lport_dest(inp, (struct sockaddr *) &lsin, &lport, (struct sockaddr *)& fsin, fport, cred, INPLOOKUP_WILDCARD); if (error) return (error); } *laddrp = laddr.s_addr; *lportp = lport; *faddrp = faddr.s_addr; *fportp = fport; return (0); } void in_pcbdisconnect(struct inpcb *inp) { INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); KASSERT(inp->inp_smr == SMR_SEQ_INVALID, ("%s: inp %p was already disconnected", __func__, inp)); in_pcbremhash_locked(inp); /* See the comment in in_pcbinshash(). */ inp->inp_smr = smr_advance(inp->inp_pcbinfo->ipi_smr); inp->inp_laddr.s_addr = INADDR_ANY; inp->inp_faddr.s_addr = INADDR_ANY; inp->inp_fport = 0; } #endif /* INET */ /* * in_pcbdetach() is responsibe for disassociating a socket from an inpcb. * For most protocols, this will be invoked immediately prior to calling * in_pcbfree(). However, with TCP the inpcb may significantly outlive the * socket, in which case in_pcbfree() is deferred. */ void in_pcbdetach(struct inpcb *inp) { KASSERT(inp->inp_socket != NULL, ("%s: inp_socket == NULL", __func__)); #ifdef RATELIMIT if (inp->inp_snd_tag != NULL) in_pcbdetach_txrtlmt(inp); #endif inp->inp_socket->so_pcb = NULL; inp->inp_socket = NULL; } /* * inpcb hash lookups are protected by SMR section. * * Once desired pcb has been found, switching from SMR section to a pcb * lock is performed with inp_smr_lock(). We can not use INP_(W|R)LOCK * here because SMR is a critical section. * In 99%+ cases inp_smr_lock() would obtain the lock immediately. */ void inp_lock(struct inpcb *inp, const inp_lookup_t lock) { lock == INPLOOKUP_RLOCKPCB ? rw_rlock(&inp->inp_lock) : rw_wlock(&inp->inp_lock); } void inp_unlock(struct inpcb *inp, const inp_lookup_t lock) { lock == INPLOOKUP_RLOCKPCB ? rw_runlock(&inp->inp_lock) : rw_wunlock(&inp->inp_lock); } int inp_trylock(struct inpcb *inp, const inp_lookup_t lock) { return (lock == INPLOOKUP_RLOCKPCB ? rw_try_rlock(&inp->inp_lock) : rw_try_wlock(&inp->inp_lock)); } static inline bool _inp_smr_lock(struct inpcb *inp, const inp_lookup_t lock, const int ignflags) { MPASS(lock == INPLOOKUP_RLOCKPCB || lock == INPLOOKUP_WLOCKPCB); SMR_ASSERT_ENTERED(inp->inp_pcbinfo->ipi_smr); if (__predict_true(inp_trylock(inp, lock))) { if (__predict_false(inp->inp_flags & ignflags)) { smr_exit(inp->inp_pcbinfo->ipi_smr); inp_unlock(inp, lock); return (false); } smr_exit(inp->inp_pcbinfo->ipi_smr); return (true); } if (__predict_true(refcount_acquire_if_not_zero(&inp->inp_refcount))) { smr_exit(inp->inp_pcbinfo->ipi_smr); inp_lock(inp, lock); if (__predict_false(in_pcbrele(inp, lock))) return (false); /* * inp acquired through refcount & lock for sure didn't went * through uma_zfree(). However, it may have already went * through in_pcbfree() and has another reference, that * prevented its release by our in_pcbrele(). */ if (__predict_false(inp->inp_flags & ignflags)) { inp_unlock(inp, lock); return (false); } return (true); } else { smr_exit(inp->inp_pcbinfo->ipi_smr); return (false); } } bool inp_smr_lock(struct inpcb *inp, const inp_lookup_t lock) { /* * in_pcblookup() family of functions ignore not only freed entries, * that may be found due to lockless access to the hash, but dropped * entries, too. */ return (_inp_smr_lock(inp, lock, INP_FREED | INP_DROPPED)); } /* * inp_next() - inpcb hash/list traversal iterator * * Requires initialized struct inpcb_iterator for context. * The structure can be initialized with INP_ITERATOR() or INP_ALL_ITERATOR(). * * - Iterator can have either write-lock or read-lock semantics, that can not * be changed later. * - Iterator can iterate either over all pcbs list (INP_ALL_LIST), or through * a single hash slot. Note: only rip_input() does the latter. * - Iterator may have optional bool matching function. The matching function * will be executed for each inpcb in the SMR context, so it can not acquire * locks and can safely access only immutable fields of inpcb. * * A fresh initialized iterator has NULL inpcb in its context and that * means that inp_next() call would return the very first inpcb on the list * locked with desired semantic. In all following calls the context pointer * shall hold the current inpcb pointer. The KPI user is not supposed to * unlock the current inpcb! Upon end of traversal inp_next() will return NULL * and write NULL to its context. After end of traversal an iterator can be * reused. * * List traversals have the following features/constraints: * - New entries won't be seen, as they are always added to the head of a list. * - Removed entries won't stop traversal as long as they are not added to * a different list. This is violated by in_pcbrehash(). */ #define II_LIST_FIRST(ipi, hash) \ (((hash) == INP_ALL_LIST) ? \ CK_LIST_FIRST(&(ipi)->ipi_listhead) : \ CK_LIST_FIRST(&(ipi)->ipi_hash_exact[(hash)])) #define II_LIST_NEXT(inp, hash) \ (((hash) == INP_ALL_LIST) ? \ CK_LIST_NEXT((inp), inp_list) : \ CK_LIST_NEXT((inp), inp_hash_exact)) #define II_LOCK_ASSERT(inp, lock) \ rw_assert(&(inp)->inp_lock, \ (lock) == INPLOOKUP_RLOCKPCB ? RA_RLOCKED : RA_WLOCKED ) struct inpcb * inp_next(struct inpcb_iterator *ii) { const struct inpcbinfo *ipi = ii->ipi; inp_match_t *match = ii->match; void *ctx = ii->ctx; inp_lookup_t lock = ii->lock; int hash = ii->hash; struct inpcb *inp; if (ii->inp == NULL) { /* First call. */ smr_enter(ipi->ipi_smr); /* This is unrolled CK_LIST_FOREACH(). */ for (inp = II_LIST_FIRST(ipi, hash); inp != NULL; inp = II_LIST_NEXT(inp, hash)) { if (match != NULL && (match)(inp, ctx) == false) continue; if (__predict_true(_inp_smr_lock(inp, lock, INP_FREED))) break; else { smr_enter(ipi->ipi_smr); MPASS(inp != II_LIST_FIRST(ipi, hash)); inp = II_LIST_FIRST(ipi, hash); if (inp == NULL) break; } } if (inp == NULL) smr_exit(ipi->ipi_smr); else ii->inp = inp; return (inp); } /* Not a first call. */ smr_enter(ipi->ipi_smr); restart: inp = ii->inp; II_LOCK_ASSERT(inp, lock); next: inp = II_LIST_NEXT(inp, hash); if (inp == NULL) { smr_exit(ipi->ipi_smr); goto found; } if (match != NULL && (match)(inp, ctx) == false) goto next; if (__predict_true(inp_trylock(inp, lock))) { if (__predict_false(inp->inp_flags & INP_FREED)) { /* * Entries are never inserted in middle of a list, thus * as long as we are in SMR, we can continue traversal. * Jump to 'restart' should yield in the same result, * but could produce unnecessary looping. Could this * looping be unbound? */ inp_unlock(inp, lock); goto next; } else { smr_exit(ipi->ipi_smr); goto found; } } /* * Can't obtain lock immediately, thus going hard. Once we exit the * SMR section we can no longer jump to 'next', and our only stable * anchoring point is ii->inp, which we keep locked for this case, so * we jump to 'restart'. */ if (__predict_true(refcount_acquire_if_not_zero(&inp->inp_refcount))) { smr_exit(ipi->ipi_smr); inp_lock(inp, lock); if (__predict_false(in_pcbrele(inp, lock))) { smr_enter(ipi->ipi_smr); goto restart; } /* * See comment in inp_smr_lock(). */ if (__predict_false(inp->inp_flags & INP_FREED)) { inp_unlock(inp, lock); smr_enter(ipi->ipi_smr); goto restart; } } else goto next; found: inp_unlock(ii->inp, lock); ii->inp = inp; return (ii->inp); } /* * in_pcbref() bumps the reference count on an inpcb in order to maintain * stability of an inpcb pointer despite the inpcb lock being released or * SMR section exited. * * To free a reference later in_pcbrele_(r|w)locked() must be performed. */ void in_pcbref(struct inpcb *inp) { u_int old __diagused; old = refcount_acquire(&inp->inp_refcount); KASSERT(old > 0, ("%s: refcount 0", __func__)); } /* * Drop a refcount on an inpcb elevated using in_pcbref(), potentially * freeing the pcb, if the reference was very last. */ bool in_pcbrele_rlocked(struct inpcb *inp) { INP_RLOCK_ASSERT(inp); if (!refcount_release(&inp->inp_refcount)) return (false); MPASS(inp->inp_flags & INP_FREED); MPASS(inp->inp_socket == NULL); crfree(inp->inp_cred); #ifdef INVARIANTS inp->inp_cred = NULL; #endif INP_RUNLOCK(inp); uma_zfree_smr(inp->inp_pcbinfo->ipi_zone, inp); return (true); } bool in_pcbrele_wlocked(struct inpcb *inp) { INP_WLOCK_ASSERT(inp); if (!refcount_release(&inp->inp_refcount)) return (false); MPASS(inp->inp_flags & INP_FREED); MPASS(inp->inp_socket == NULL); crfree(inp->inp_cred); #ifdef INVARIANTS inp->inp_cred = NULL; #endif INP_WUNLOCK(inp); uma_zfree_smr(inp->inp_pcbinfo->ipi_zone, inp); return (true); } bool in_pcbrele(struct inpcb *inp, const inp_lookup_t lock) { return (lock == INPLOOKUP_RLOCKPCB ? in_pcbrele_rlocked(inp) : in_pcbrele_wlocked(inp)); } /* * Unconditionally schedule an inpcb to be freed by decrementing its * reference count, which should occur only after the inpcb has been detached * from its socket. If another thread holds a temporary reference (acquired * using in_pcbref()) then the free is deferred until that reference is * released using in_pcbrele_(r|w)locked(), but the inpcb is still unlocked. * Almost all work, including removal from global lists, is done in this * context, where the pcbinfo lock is held. */ void in_pcbfree(struct inpcb *inp) { struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; #ifdef INET struct ip_moptions *imo; #endif #ifdef INET6 struct ip6_moptions *im6o; #endif INP_WLOCK_ASSERT(inp); KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__)); KASSERT((inp->inp_flags & INP_FREED) == 0, ("%s: called twice for pcb %p", __func__, inp)); inp->inp_flags |= INP_FREED; INP_INFO_WLOCK(pcbinfo); inp->inp_gencnt = ++pcbinfo->ipi_gencnt; pcbinfo->ipi_count--; CK_LIST_REMOVE(inp, inp_list); INP_INFO_WUNLOCK(pcbinfo); if (inp->inp_flags & INP_INHASHLIST) in_pcbremhash(inp); RO_INVALIDATE_CACHE(&inp->inp_route); #ifdef MAC mac_inpcb_destroy(inp); #endif #if defined(IPSEC) || defined(IPSEC_SUPPORT) if (inp->inp_sp != NULL) ipsec_delete_pcbpolicy(inp); #endif #ifdef INET if (inp->inp_options) (void)m_free(inp->inp_options); imo = inp->inp_moptions; #endif #ifdef INET6 if (inp->inp_vflag & INP_IPV6PROTO) { ip6_freepcbopts(inp->in6p_outputopts); im6o = inp->in6p_moptions; } else im6o = NULL; #endif if (__predict_false(in_pcbrele_wlocked(inp) == false)) { INP_WUNLOCK(inp); } #ifdef INET6 ip6_freemoptions(im6o); #endif #ifdef INET inp_freemoptions(imo); #endif } /* * Different protocols initialize their inpcbs differently - giving * different name to the lock. But they all are disposed the same. */ static void inpcb_fini(void *mem, int size) { struct inpcb *inp = mem; INP_LOCK_DESTROY(inp); } /* * in_pcbdrop() removes an inpcb from hashed lists, releasing its address and * port reservation, and preventing it from being returned by inpcb lookups. * * It is used by TCP to mark an inpcb as unused and avoid future packet * delivery or event notification when a socket remains open but TCP has * closed. This might occur as a result of a shutdown()-initiated TCP close * or a RST on the wire, and allows the port binding to be reused while still * maintaining the invariant that so_pcb always points to a valid inpcb until * in_pcbdetach(). * * XXXRW: Possibly in_pcbdrop() should also prevent future notifications by * in_pcbpurgeif0()? */ void in_pcbdrop(struct inpcb *inp) { INP_WLOCK_ASSERT(inp); #ifdef INVARIANTS if (inp->inp_socket != NULL && inp->inp_ppcb != NULL) MPASS(inp->inp_refcount > 1); #endif inp->inp_flags |= INP_DROPPED; if (inp->inp_flags & INP_INHASHLIST) in_pcbremhash(inp); } #ifdef INET /* * Common routines to return the socket addresses associated with inpcbs. */ -struct sockaddr * -in_sockaddr(in_port_t port, struct in_addr *addr_p) -{ - struct sockaddr_in *sin; - - sin = malloc(sizeof *sin, M_SONAME, - M_WAITOK | M_ZERO); - sin->sin_family = AF_INET; - sin->sin_len = sizeof(*sin); - sin->sin_addr = *addr_p; - sin->sin_port = port; - - return (struct sockaddr *)sin; -} - int -in_getsockaddr(struct socket *so, struct sockaddr **nam) +in_getsockaddr(struct socket *so, struct sockaddr *sa) { struct inpcb *inp; - struct in_addr addr; - in_port_t port; inp = sotoinpcb(so); KASSERT(inp != NULL, ("in_getsockaddr: inp == NULL")); - INP_RLOCK(inp); - port = inp->inp_lport; - addr = inp->inp_laddr; - INP_RUNLOCK(inp); + *(struct sockaddr_in *)sa = (struct sockaddr_in ){ + .sin_len = sizeof(struct sockaddr_in), + .sin_family = AF_INET, + .sin_port = inp->inp_lport, + .sin_addr = inp->inp_laddr, + }; - *nam = in_sockaddr(port, &addr); - return 0; + return (0); } int -in_getpeeraddr(struct socket *so, struct sockaddr **nam) +in_getpeeraddr(struct socket *so, struct sockaddr *sa) { struct inpcb *inp; - struct in_addr addr; - in_port_t port; inp = sotoinpcb(so); KASSERT(inp != NULL, ("in_getpeeraddr: inp == NULL")); - INP_RLOCK(inp); - port = inp->inp_fport; - addr = inp->inp_faddr; - INP_RUNLOCK(inp); + *(struct sockaddr_in *)sa = (struct sockaddr_in ){ + .sin_len = sizeof(struct sockaddr_in), + .sin_family = AF_INET, + .sin_port = inp->inp_fport, + .sin_addr = inp->inp_faddr, + }; - *nam = in_sockaddr(port, &addr); - return 0; + return (0); } static bool inp_v4_multi_match(const struct inpcb *inp, void *v __unused) { if ((inp->inp_vflag & INP_IPV4) && inp->inp_moptions != NULL) return (true); else return (false); } void in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp) { struct inpcb_iterator inpi = INP_ITERATOR(pcbinfo, INPLOOKUP_WLOCKPCB, inp_v4_multi_match, NULL); struct inpcb *inp; struct in_multi *inm; struct in_mfilter *imf; struct ip_moptions *imo; IN_MULTI_LOCK_ASSERT(); while ((inp = inp_next(&inpi)) != NULL) { INP_WLOCK_ASSERT(inp); imo = inp->inp_moptions; /* * Unselect the outgoing interface if it is being * detached. */ if (imo->imo_multicast_ifp == ifp) imo->imo_multicast_ifp = NULL; /* * Drop multicast group membership if we joined * through the interface being detached. * * XXX This can all be deferred to an epoch_call */ restart: IP_MFILTER_FOREACH(imf, &imo->imo_head) { if ((inm = imf->imf_inm) == NULL) continue; if (inm->inm_ifp != ifp) continue; ip_mfilter_remove(&imo->imo_head, imf); in_leavegroup_locked(inm, NULL); ip_mfilter_free(imf); goto restart; } } } /* * Lookup a PCB based on the local address and port. Caller must hold the * hash lock. No inpcb locks or references are acquired. */ #define INP_LOOKUP_MAPPED_PCB_COST 3 struct inpcb * in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, u_short lport, int lookupflags, struct ucred *cred) { struct inpcb *inp; #ifdef INET6 int matchwild = 3 + INP_LOOKUP_MAPPED_PCB_COST; #else int matchwild = 3; #endif int wildcard; KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0, ("%s: invalid lookup flags %d", __func__, lookupflags)); INP_HASH_LOCK_ASSERT(pcbinfo); if ((lookupflags & INPLOOKUP_WILDCARD) == 0) { struct inpcbhead *head; /* * Look for an unconnected (wildcard foreign addr) PCB that * matches the local address and port we're looking for. */ head = &pcbinfo->ipi_hash_wild[INP_PCBHASH_WILD(lport, pcbinfo->ipi_hashmask)]; CK_LIST_FOREACH(inp, head, inp_hash_wild) { #ifdef INET6 /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV4) == 0) continue; #endif if (inp->inp_faddr.s_addr == INADDR_ANY && inp->inp_laddr.s_addr == laddr.s_addr && inp->inp_lport == lport) { /* * Found? */ if (prison_equal_ip4(cred->cr_prison, inp->inp_cred->cr_prison)) return (inp); } } /* * Not found. */ return (NULL); } else { struct inpcbporthead *porthash; struct inpcbport *phd; struct inpcb *match = NULL; /* * Best fit PCB lookup. * * First see if this local port is in use by looking on the * port hash list. */ porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport, pcbinfo->ipi_porthashmask)]; CK_LIST_FOREACH(phd, porthash, phd_hash) { if (phd->phd_port == lport) break; } if (phd != NULL) { /* * Port is in use by one or more PCBs. Look for best * fit. */ CK_LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) { wildcard = 0; if (!prison_equal_ip4(inp->inp_cred->cr_prison, cred->cr_prison)) continue; #ifdef INET6 /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV4) == 0) continue; /* * We never select the PCB that has * INP_IPV6 flag and is bound to :: if * we have another PCB which is bound * to 0.0.0.0. If a PCB has the * INP_IPV6 flag, then we set its cost * higher than IPv4 only PCBs. * * Note that the case only happens * when a socket is bound to ::, under * the condition that the use of the * mapped address is allowed. */ if ((inp->inp_vflag & INP_IPV6) != 0) wildcard += INP_LOOKUP_MAPPED_PCB_COST; #endif if (inp->inp_faddr.s_addr != INADDR_ANY) wildcard++; if (inp->inp_laddr.s_addr != INADDR_ANY) { if (laddr.s_addr == INADDR_ANY) wildcard++; else if (inp->inp_laddr.s_addr != laddr.s_addr) continue; } else { if (laddr.s_addr != INADDR_ANY) wildcard++; } if (wildcard < matchwild) { match = inp; matchwild = wildcard; if (matchwild == 0) break; } } } return (match); } } #undef INP_LOOKUP_MAPPED_PCB_COST static bool in_pcblookup_lb_numa_match(const struct inpcblbgroup *grp, int domain) { return (domain == M_NODOM || domain == grp->il_numa_domain); } static struct inpcb * in_pcblookup_lbgroup(const struct inpcbinfo *pcbinfo, const struct in_addr *faddr, uint16_t fport, const struct in_addr *laddr, uint16_t lport, int domain) { const struct inpcblbgrouphead *hdr; struct inpcblbgroup *grp; struct inpcblbgroup *jail_exact, *jail_wild, *local_exact, *local_wild; INP_HASH_LOCK_ASSERT(pcbinfo); hdr = &pcbinfo->ipi_lbgrouphashbase[ INP_PCBPORTHASH(lport, pcbinfo->ipi_lbgrouphashmask)]; /* * Search for an LB group match based on the following criteria: * - prefer jailed groups to non-jailed groups * - prefer exact source address matches to wildcard matches * - prefer groups bound to the specified NUMA domain */ jail_exact = jail_wild = local_exact = local_wild = NULL; CK_LIST_FOREACH(grp, hdr, il_list) { bool injail; #ifdef INET6 if (!(grp->il_vflag & INP_IPV4)) continue; #endif if (grp->il_lport != lport) continue; injail = prison_flag(grp->il_cred, PR_IP4) != 0; if (injail && prison_check_ip4_locked(grp->il_cred->cr_prison, laddr) != 0) continue; if (grp->il_laddr.s_addr == laddr->s_addr) { if (injail) { jail_exact = grp; if (in_pcblookup_lb_numa_match(grp, domain)) /* This is a perfect match. */ goto out; } else if (local_exact == NULL || in_pcblookup_lb_numa_match(grp, domain)) { local_exact = grp; } } else if (grp->il_laddr.s_addr == INADDR_ANY) { if (injail) { if (jail_wild == NULL || in_pcblookup_lb_numa_match(grp, domain)) jail_wild = grp; } else if (local_wild == NULL || in_pcblookup_lb_numa_match(grp, domain)) { local_wild = grp; } } } if (jail_exact != NULL) grp = jail_exact; else if (jail_wild != NULL) grp = jail_wild; else if (local_exact != NULL) grp = local_exact; else grp = local_wild; if (grp == NULL) return (NULL); out: return (grp->il_inp[INP_PCBLBGROUP_PKTHASH(faddr, lport, fport) % grp->il_inpcnt]); } static bool in_pcblookup_exact_match(const struct inpcb *inp, struct in_addr faddr, u_short fport, struct in_addr laddr, u_short lport) { #ifdef INET6 /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV4) == 0) return (false); #endif if (inp->inp_faddr.s_addr == faddr.s_addr && inp->inp_laddr.s_addr == laddr.s_addr && inp->inp_fport == fport && inp->inp_lport == lport) return (true); return (false); } static struct inpcb * in_pcblookup_hash_exact(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_short fport, struct in_addr laddr, u_short lport) { struct inpcbhead *head; struct inpcb *inp; INP_HASH_LOCK_ASSERT(pcbinfo); head = &pcbinfo->ipi_hash_exact[INP_PCBHASH(&faddr, lport, fport, pcbinfo->ipi_hashmask)]; CK_LIST_FOREACH(inp, head, inp_hash_exact) { if (in_pcblookup_exact_match(inp, faddr, fport, laddr, lport)) return (inp); } return (NULL); } typedef enum { INPLOOKUP_MATCH_NONE = 0, INPLOOKUP_MATCH_WILD = 1, INPLOOKUP_MATCH_LADDR = 2, } inp_lookup_match_t; static inp_lookup_match_t in_pcblookup_wild_match(const struct inpcb *inp, struct in_addr laddr, u_short lport) { #ifdef INET6 /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV4) == 0) return (INPLOOKUP_MATCH_NONE); #endif if (inp->inp_faddr.s_addr != INADDR_ANY || inp->inp_lport != lport) return (INPLOOKUP_MATCH_NONE); if (inp->inp_laddr.s_addr == INADDR_ANY) return (INPLOOKUP_MATCH_WILD); if (inp->inp_laddr.s_addr == laddr.s_addr) return (INPLOOKUP_MATCH_LADDR); return (INPLOOKUP_MATCH_NONE); } #define INP_LOOKUP_AGAIN ((struct inpcb *)(uintptr_t)-1) static struct inpcb * in_pcblookup_hash_wild_smr(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_short fport, struct in_addr laddr, u_short lport, const inp_lookup_t lockflags) { struct inpcbhead *head; struct inpcb *inp; KASSERT(SMR_ENTERED(pcbinfo->ipi_smr), ("%s: not in SMR read section", __func__)); head = &pcbinfo->ipi_hash_wild[INP_PCBHASH_WILD(lport, pcbinfo->ipi_hashmask)]; CK_LIST_FOREACH(inp, head, inp_hash_wild) { inp_lookup_match_t match; match = in_pcblookup_wild_match(inp, laddr, lport); if (match == INPLOOKUP_MATCH_NONE) continue; if (__predict_true(inp_smr_lock(inp, lockflags))) { match = in_pcblookup_wild_match(inp, laddr, lport); if (match != INPLOOKUP_MATCH_NONE && prison_check_ip4_locked(inp->inp_cred->cr_prison, &laddr) == 0) return (inp); inp_unlock(inp, lockflags); } /* * The matching socket disappeared out from under us. Fall back * to a serialized lookup. */ return (INP_LOOKUP_AGAIN); } return (NULL); } static struct inpcb * in_pcblookup_hash_wild_locked(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_short fport, struct in_addr laddr, u_short lport) { struct inpcbhead *head; struct inpcb *inp, *local_wild, *local_exact, *jail_wild; #ifdef INET6 struct inpcb *local_wild_mapped; #endif INP_HASH_LOCK_ASSERT(pcbinfo); /* * Order of socket selection - we always prefer jails. * 1. jailed, non-wild. * 2. jailed, wild. * 3. non-jailed, non-wild. * 4. non-jailed, wild. */ head = &pcbinfo->ipi_hash_wild[INP_PCBHASH_WILD(lport, pcbinfo->ipi_hashmask)]; local_wild = local_exact = jail_wild = NULL; #ifdef INET6 local_wild_mapped = NULL; #endif CK_LIST_FOREACH(inp, head, inp_hash_wild) { inp_lookup_match_t match; bool injail; match = in_pcblookup_wild_match(inp, laddr, lport); if (match == INPLOOKUP_MATCH_NONE) continue; injail = prison_flag(inp->inp_cred, PR_IP4) != 0; if (injail) { if (prison_check_ip4_locked(inp->inp_cred->cr_prison, &laddr) != 0) continue; } else { if (local_exact != NULL) continue; } if (match == INPLOOKUP_MATCH_LADDR) { if (injail) return (inp); local_exact = inp; } else { #ifdef INET6 /* XXX inp locking, NULL check */ if (inp->inp_vflag & INP_IPV6PROTO) local_wild_mapped = inp; else #endif if (injail) jail_wild = inp; else local_wild = inp; } } if (jail_wild != NULL) return (jail_wild); if (local_exact != NULL) return (local_exact); if (local_wild != NULL) return (local_wild); #ifdef INET6 if (local_wild_mapped != NULL) return (local_wild_mapped); #endif return (NULL); } /* * Lookup PCB in hash list, using pcbinfo tables. This variation assumes * that the caller has either locked the hash list, which usually happens * for bind(2) operations, or is in SMR section, which happens when sorting * out incoming packets. */ static struct inpcb * in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport_arg, struct in_addr laddr, u_int lport_arg, int lookupflags, uint8_t numa_domain) { struct inpcb *inp; const u_short fport = fport_arg, lport = lport_arg; KASSERT((lookupflags & ~INPLOOKUP_WILDCARD) == 0, ("%s: invalid lookup flags %d", __func__, lookupflags)); KASSERT(faddr.s_addr != INADDR_ANY, ("%s: invalid foreign address", __func__)); KASSERT(laddr.s_addr != INADDR_ANY, ("%s: invalid local address", __func__)); INP_HASH_WLOCK_ASSERT(pcbinfo); inp = in_pcblookup_hash_exact(pcbinfo, faddr, fport, laddr, lport); if (inp != NULL) return (inp); if ((lookupflags & INPLOOKUP_WILDCARD) != 0) { inp = in_pcblookup_lbgroup(pcbinfo, &faddr, fport, &laddr, lport, numa_domain); if (inp == NULL) { inp = in_pcblookup_hash_wild_locked(pcbinfo, faddr, fport, laddr, lport); } } return (inp); } static struct inpcb * in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport, struct in_addr laddr, u_int lport, int lookupflags, uint8_t numa_domain) { struct inpcb *inp; const inp_lookup_t lockflags = lookupflags & INPLOOKUP_LOCKMASK; KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0, ("%s: LOCKPCB not set", __func__)); INP_HASH_WLOCK(pcbinfo); inp = in_pcblookup_hash_locked(pcbinfo, faddr, fport, laddr, lport, lookupflags & ~INPLOOKUP_LOCKMASK, numa_domain); if (inp != NULL && !inp_trylock(inp, lockflags)) { in_pcbref(inp); INP_HASH_WUNLOCK(pcbinfo); inp_lock(inp, lockflags); if (in_pcbrele(inp, lockflags)) /* XXX-MJ or retry until we get a negative match? */ inp = NULL; } else { INP_HASH_WUNLOCK(pcbinfo); } return (inp); } static struct inpcb * in_pcblookup_hash_smr(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport_arg, struct in_addr laddr, u_int lport_arg, int lookupflags, uint8_t numa_domain) { struct inpcb *inp; const inp_lookup_t lockflags = lookupflags & INPLOOKUP_LOCKMASK; const u_short fport = fport_arg, lport = lport_arg; KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0, ("%s: invalid lookup flags %d", __func__, lookupflags)); KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0, ("%s: LOCKPCB not set", __func__)); smr_enter(pcbinfo->ipi_smr); inp = in_pcblookup_hash_exact(pcbinfo, faddr, fport, laddr, lport); if (inp != NULL) { if (__predict_true(inp_smr_lock(inp, lockflags))) { /* * Revalidate the 4-tuple, the socket could have been * disconnected. */ if (__predict_true(in_pcblookup_exact_match(inp, faddr, fport, laddr, lport))) return (inp); inp_unlock(inp, lockflags); } /* * We failed to lock the inpcb, or its connection state changed * out from under us. Fall back to a precise search. */ return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport, lookupflags, numa_domain)); } if ((lookupflags & INPLOOKUP_WILDCARD) != 0) { inp = in_pcblookup_lbgroup(pcbinfo, &faddr, fport, &laddr, lport, numa_domain); if (inp != NULL) { if (__predict_true(inp_smr_lock(inp, lockflags))) { if (__predict_true(in_pcblookup_wild_match(inp, laddr, lport) != INPLOOKUP_MATCH_NONE)) return (inp); inp_unlock(inp, lockflags); } inp = INP_LOOKUP_AGAIN; } else { inp = in_pcblookup_hash_wild_smr(pcbinfo, faddr, fport, laddr, lport, lockflags); } if (inp == INP_LOOKUP_AGAIN) { return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport, lookupflags, numa_domain)); } } if (inp == NULL) smr_exit(pcbinfo->ipi_smr); return (inp); } /* * Public inpcb lookup routines, accepting a 4-tuple, and optionally, an mbuf * from which a pre-calculated hash value may be extracted. */ struct inpcb * in_pcblookup(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport, struct in_addr laddr, u_int lport, int lookupflags, struct ifnet *ifp __unused) { return (in_pcblookup_hash_smr(pcbinfo, faddr, fport, laddr, lport, lookupflags, M_NODOM)); } struct inpcb * in_pcblookup_mbuf(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport, struct in_addr laddr, u_int lport, int lookupflags, struct ifnet *ifp __unused, struct mbuf *m) { return (in_pcblookup_hash_smr(pcbinfo, faddr, fport, laddr, lport, lookupflags, m->m_pkthdr.numa_domain)); } #endif /* INET */ static bool in_pcbjailed(const struct inpcb *inp, unsigned int flag) { return (prison_flag(inp->inp_cred, flag) != 0); } /* * Insert the PCB into a hash chain using ordering rules which ensure that * in_pcblookup_hash_wild_*() always encounter the highest-ranking PCB first. * * Specifically, keep jailed PCBs in front of non-jailed PCBs, and keep PCBs * with exact local addresses ahead of wildcard PCBs. Unbound v4-mapped v6 PCBs * always appear last no matter whether they are jailed. */ static void _in_pcbinshash_wild(struct inpcbhead *pcbhash, struct inpcb *inp) { struct inpcb *last; bool bound, injail; INP_LOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); last = NULL; bound = inp->inp_laddr.s_addr != INADDR_ANY; if (!bound && (inp->inp_vflag & INP_IPV6PROTO) != 0) { CK_LIST_FOREACH(last, pcbhash, inp_hash_wild) { if (CK_LIST_NEXT(last, inp_hash_wild) == NULL) { CK_LIST_INSERT_AFTER(last, inp, inp_hash_wild); return; } } CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash_wild); return; } injail = in_pcbjailed(inp, PR_IP4); if (!injail) { CK_LIST_FOREACH(last, pcbhash, inp_hash_wild) { if (!in_pcbjailed(last, PR_IP4)) break; if (CK_LIST_NEXT(last, inp_hash_wild) == NULL) { CK_LIST_INSERT_AFTER(last, inp, inp_hash_wild); return; } } } else if (!CK_LIST_EMPTY(pcbhash) && !in_pcbjailed(CK_LIST_FIRST(pcbhash), PR_IP4)) { CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash_wild); return; } if (!bound) { CK_LIST_FOREACH_FROM(last, pcbhash, inp_hash_wild) { if (last->inp_laddr.s_addr == INADDR_ANY) break; if (CK_LIST_NEXT(last, inp_hash_wild) == NULL) { CK_LIST_INSERT_AFTER(last, inp, inp_hash_wild); return; } } } if (last == NULL) CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash_wild); else CK_LIST_INSERT_BEFORE(last, inp, inp_hash_wild); } #ifdef INET6 /* * See the comment above _in_pcbinshash_wild(). */ static void _in6_pcbinshash_wild(struct inpcbhead *pcbhash, struct inpcb *inp) { struct inpcb *last; bool bound, injail; INP_LOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); last = NULL; bound = !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr); injail = in_pcbjailed(inp, PR_IP6); if (!injail) { CK_LIST_FOREACH(last, pcbhash, inp_hash_wild) { if (!in_pcbjailed(last, PR_IP6)) break; if (CK_LIST_NEXT(last, inp_hash_wild) == NULL) { CK_LIST_INSERT_AFTER(last, inp, inp_hash_wild); return; } } } else if (!CK_LIST_EMPTY(pcbhash) && !in_pcbjailed(CK_LIST_FIRST(pcbhash), PR_IP6)) { CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash_wild); return; } if (!bound) { CK_LIST_FOREACH_FROM(last, pcbhash, inp_hash_wild) { if (IN6_IS_ADDR_UNSPECIFIED(&last->in6p_laddr)) break; if (CK_LIST_NEXT(last, inp_hash_wild) == NULL) { CK_LIST_INSERT_AFTER(last, inp, inp_hash_wild); return; } } } if (last == NULL) CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash_wild); else CK_LIST_INSERT_BEFORE(last, inp, inp_hash_wild); } #endif /* * Insert PCB onto various hash lists. */ int in_pcbinshash(struct inpcb *inp) { struct inpcbhead *pcbhash; struct inpcbporthead *pcbporthash; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; struct inpcbport *phd; uint32_t hash; bool connected; INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(pcbinfo); KASSERT((inp->inp_flags & INP_INHASHLIST) == 0, ("in_pcbinshash: INP_INHASHLIST")); #ifdef INET6 if (inp->inp_vflag & INP_IPV6) { hash = INP6_PCBHASH(&inp->in6p_faddr, inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask); connected = !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr); } else #endif { hash = INP_PCBHASH(&inp->inp_faddr, inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask); connected = !in_nullhost(inp->inp_faddr); } if (connected) pcbhash = &pcbinfo->ipi_hash_exact[hash]; else pcbhash = &pcbinfo->ipi_hash_wild[hash]; pcbporthash = &pcbinfo->ipi_porthashbase[ INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_porthashmask)]; /* * Add entry to load balance group. * Only do this if SO_REUSEPORT_LB is set. */ if ((inp->inp_socket->so_options & SO_REUSEPORT_LB) != 0) { int error = in_pcbinslbgrouphash(inp, M_NODOM); if (error != 0) return (error); } /* * Go through port list and look for a head for this lport. */ CK_LIST_FOREACH(phd, pcbporthash, phd_hash) { if (phd->phd_port == inp->inp_lport) break; } /* * If none exists, malloc one and tack it on. */ if (phd == NULL) { phd = uma_zalloc_smr(pcbinfo->ipi_portzone, M_NOWAIT); if (phd == NULL) { if ((inp->inp_flags & INP_INLBGROUP) != 0) in_pcbremlbgrouphash(inp); return (ENOMEM); } phd->phd_port = inp->inp_lport; CK_LIST_INIT(&phd->phd_pcblist); CK_LIST_INSERT_HEAD(pcbporthash, phd, phd_hash); } inp->inp_phd = phd; CK_LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist); /* * The PCB may have been disconnected in the past. Before we can safely * make it visible in the hash table, we must wait for all readers which * may be traversing this PCB to finish. */ if (inp->inp_smr != SMR_SEQ_INVALID) { smr_wait(pcbinfo->ipi_smr, inp->inp_smr); inp->inp_smr = SMR_SEQ_INVALID; } if (connected) CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash_exact); else { #ifdef INET6 if ((inp->inp_vflag & INP_IPV6) != 0) _in6_pcbinshash_wild(pcbhash, inp); else #endif _in_pcbinshash_wild(pcbhash, inp); } inp->inp_flags |= INP_INHASHLIST; return (0); } void in_pcbremhash_locked(struct inpcb *inp) { struct inpcbport *phd = inp->inp_phd; INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); MPASS(inp->inp_flags & INP_INHASHLIST); if ((inp->inp_flags & INP_INLBGROUP) != 0) in_pcbremlbgrouphash(inp); #ifdef INET6 if (inp->inp_vflag & INP_IPV6) { if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) CK_LIST_REMOVE(inp, inp_hash_wild); else CK_LIST_REMOVE(inp, inp_hash_exact); } else #endif { if (in_nullhost(inp->inp_faddr)) CK_LIST_REMOVE(inp, inp_hash_wild); else CK_LIST_REMOVE(inp, inp_hash_exact); } CK_LIST_REMOVE(inp, inp_portlist); if (CK_LIST_FIRST(&phd->phd_pcblist) == NULL) { CK_LIST_REMOVE(phd, phd_hash); uma_zfree_smr(inp->inp_pcbinfo->ipi_portzone, phd); } inp->inp_flags &= ~INP_INHASHLIST; } static void in_pcbremhash(struct inpcb *inp) { INP_HASH_WLOCK(inp->inp_pcbinfo); in_pcbremhash_locked(inp); INP_HASH_WUNLOCK(inp->inp_pcbinfo); } /* * Move PCB to the proper hash bucket when { faddr, fport } have been * changed. NOTE: This does not handle the case of the lport changing (the * hashed port list would have to be updated as well), so the lport must * not change after in_pcbinshash() has been called. */ void in_pcbrehash(struct inpcb *inp) { struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; struct inpcbhead *head; uint32_t hash; bool connected; INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(pcbinfo); KASSERT(inp->inp_flags & INP_INHASHLIST, ("%s: !INP_INHASHLIST", __func__)); KASSERT(inp->inp_smr == SMR_SEQ_INVALID, ("%s: inp was disconnected", __func__)); #ifdef INET6 if (inp->inp_vflag & INP_IPV6) { hash = INP6_PCBHASH(&inp->in6p_faddr, inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask); connected = !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr); } else #endif { hash = INP_PCBHASH(&inp->inp_faddr, inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask); connected = !in_nullhost(inp->inp_faddr); } /* * When rehashing, the caller must ensure that either the new or the old * foreign address was unspecified. */ if (connected) CK_LIST_REMOVE(inp, inp_hash_wild); else CK_LIST_REMOVE(inp, inp_hash_exact); if (connected) { head = &pcbinfo->ipi_hash_exact[hash]; CK_LIST_INSERT_HEAD(head, inp, inp_hash_exact); } else { head = &pcbinfo->ipi_hash_wild[hash]; CK_LIST_INSERT_HEAD(head, inp, inp_hash_wild); } } /* * Check for alternatives when higher level complains * about service problems. For now, invalidate cached * routing information. If the route was created dynamically * (by a redirect), time to try a default gateway again. */ void in_losing(struct inpcb *inp) { RO_INVALIDATE_CACHE(&inp->inp_route); return; } /* * A set label operation has occurred at the socket layer, propagate the * label change into the in_pcb for the socket. */ void in_pcbsosetlabel(struct socket *so) { #ifdef MAC struct inpcb *inp; inp = sotoinpcb(so); KASSERT(inp != NULL, ("in_pcbsosetlabel: so->so_pcb == NULL")); INP_WLOCK(inp); SOCK_LOCK(so); mac_inpcb_sosetlabel(so, inp); SOCK_UNLOCK(so); INP_WUNLOCK(inp); #endif } void inp_wlock(struct inpcb *inp) { INP_WLOCK(inp); } void inp_wunlock(struct inpcb *inp) { INP_WUNLOCK(inp); } void inp_rlock(struct inpcb *inp) { INP_RLOCK(inp); } void inp_runlock(struct inpcb *inp) { INP_RUNLOCK(inp); } #ifdef INVARIANT_SUPPORT void inp_lock_assert(struct inpcb *inp) { INP_WLOCK_ASSERT(inp); } void inp_unlock_assert(struct inpcb *inp) { INP_UNLOCK_ASSERT(inp); } #endif void inp_apply_all(struct inpcbinfo *pcbinfo, void (*func)(struct inpcb *, void *), void *arg) { struct inpcb_iterator inpi = INP_ALL_ITERATOR(pcbinfo, INPLOOKUP_WLOCKPCB); struct inpcb *inp; while ((inp = inp_next(&inpi)) != NULL) func(inp, arg); } struct socket * inp_inpcbtosocket(struct inpcb *inp) { INP_WLOCK_ASSERT(inp); return (inp->inp_socket); } struct tcpcb * inp_inpcbtotcpcb(struct inpcb *inp) { INP_WLOCK_ASSERT(inp); return ((struct tcpcb *)inp->inp_ppcb); } int inp_ip_tos_get(const struct inpcb *inp) { return (inp->inp_ip_tos); } void inp_ip_tos_set(struct inpcb *inp, int val) { inp->inp_ip_tos = val; } void inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp, uint32_t *faddr, uint16_t *fp) { INP_LOCK_ASSERT(inp); *laddr = inp->inp_laddr.s_addr; *faddr = inp->inp_faddr.s_addr; *lp = inp->inp_lport; *fp = inp->inp_fport; } struct inpcb * so_sotoinpcb(struct socket *so) { return (sotoinpcb(so)); } /* * Create an external-format (``xinpcb'') structure using the information in * the kernel-format in_pcb structure pointed to by inp. This is done to * reduce the spew of irrelevant information over this interface, to isolate * user code from changes in the kernel structure, and potentially to provide * information-hiding if we decide that some of this information should be * hidden from users. */ void in_pcbtoxinpcb(const struct inpcb *inp, struct xinpcb *xi) { bzero(xi, sizeof(*xi)); xi->xi_len = sizeof(struct xinpcb); if (inp->inp_socket) sotoxsocket(inp->inp_socket, &xi->xi_socket); bcopy(&inp->inp_inc, &xi->inp_inc, sizeof(struct in_conninfo)); xi->inp_gencnt = inp->inp_gencnt; xi->inp_ppcb = (uintptr_t)inp->inp_ppcb; xi->inp_flow = inp->inp_flow; xi->inp_flowid = inp->inp_flowid; xi->inp_flowtype = inp->inp_flowtype; xi->inp_flags = inp->inp_flags; xi->inp_flags2 = inp->inp_flags2; xi->in6p_cksum = inp->in6p_cksum; xi->in6p_hops = inp->in6p_hops; xi->inp_ip_tos = inp->inp_ip_tos; xi->inp_vflag = inp->inp_vflag; xi->inp_ip_ttl = inp->inp_ip_ttl; xi->inp_ip_p = inp->inp_ip_p; xi->inp_ip_minttl = inp->inp_ip_minttl; } int sysctl_setsockopt(SYSCTL_HANDLER_ARGS, struct inpcbinfo *pcbinfo, int (*ctloutput_set)(struct inpcb *, struct sockopt *)) { struct sockopt sopt; struct inpcb_iterator inpi = INP_ALL_ITERATOR(pcbinfo, INPLOOKUP_WLOCKPCB); struct inpcb *inp; struct sockopt_parameters *params; struct socket *so; int error; char buf[1024]; if (req->oldptr != NULL || req->oldlen != 0) return (EINVAL); if (req->newptr == NULL) return (EPERM); if (req->newlen > sizeof(buf)) return (ENOMEM); error = SYSCTL_IN(req, buf, req->newlen); if (error != 0) return (error); if (req->newlen < sizeof(struct sockopt_parameters)) return (EINVAL); params = (struct sockopt_parameters *)buf; sopt.sopt_level = params->sop_level; sopt.sopt_name = params->sop_optname; sopt.sopt_dir = SOPT_SET; sopt.sopt_val = params->sop_optval; sopt.sopt_valsize = req->newlen - sizeof(struct sockopt_parameters); sopt.sopt_td = NULL; #ifdef INET6 if (params->sop_inc.inc_flags & INC_ISIPV6) { if (IN6_IS_SCOPE_LINKLOCAL(¶ms->sop_inc.inc6_laddr)) params->sop_inc.inc6_laddr.s6_addr16[1] = htons(params->sop_inc.inc6_zoneid & 0xffff); if (IN6_IS_SCOPE_LINKLOCAL(¶ms->sop_inc.inc6_faddr)) params->sop_inc.inc6_faddr.s6_addr16[1] = htons(params->sop_inc.inc6_zoneid & 0xffff); } #endif if (params->sop_inc.inc_lport != htons(0)) { if (params->sop_inc.inc_fport == htons(0)) inpi.hash = INP_PCBHASH_WILD(params->sop_inc.inc_lport, pcbinfo->ipi_hashmask); else #ifdef INET6 if (params->sop_inc.inc_flags & INC_ISIPV6) inpi.hash = INP6_PCBHASH( ¶ms->sop_inc.inc6_faddr, params->sop_inc.inc_lport, params->sop_inc.inc_fport, pcbinfo->ipi_hashmask); else #endif inpi.hash = INP_PCBHASH( ¶ms->sop_inc.inc_faddr, params->sop_inc.inc_lport, params->sop_inc.inc_fport, pcbinfo->ipi_hashmask); } while ((inp = inp_next(&inpi)) != NULL) if (inp->inp_gencnt == params->sop_id) { if (inp->inp_flags & INP_DROPPED) { INP_WUNLOCK(inp); return (ECONNRESET); } so = inp->inp_socket; KASSERT(so != NULL, ("inp_socket == NULL")); soref(so); error = (*ctloutput_set)(inp, &sopt); sorele(so); break; } if (inp == NULL) error = ESRCH; return (error); } #ifdef DDB static void db_print_indent(int indent) { int i; for (i = 0; i < indent; i++) db_printf(" "); } static void db_print_inconninfo(struct in_conninfo *inc, const char *name, int indent) { char faddr_str[48], laddr_str[48]; db_print_indent(indent); db_printf("%s at %p\n", name, inc); indent += 2; #ifdef INET6 if (inc->inc_flags & INC_ISIPV6) { /* IPv6. */ ip6_sprintf(laddr_str, &inc->inc6_laddr); ip6_sprintf(faddr_str, &inc->inc6_faddr); } else #endif { /* IPv4. */ inet_ntoa_r(inc->inc_laddr, laddr_str); inet_ntoa_r(inc->inc_faddr, faddr_str); } db_print_indent(indent); db_printf("inc_laddr %s inc_lport %u\n", laddr_str, ntohs(inc->inc_lport)); db_print_indent(indent); db_printf("inc_faddr %s inc_fport %u\n", faddr_str, ntohs(inc->inc_fport)); } static void db_print_inpflags(int inp_flags) { int comma; comma = 0; if (inp_flags & INP_RECVOPTS) { db_printf("%sINP_RECVOPTS", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_RECVRETOPTS) { db_printf("%sINP_RECVRETOPTS", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_RECVDSTADDR) { db_printf("%sINP_RECVDSTADDR", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_ORIGDSTADDR) { db_printf("%sINP_ORIGDSTADDR", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_HDRINCL) { db_printf("%sINP_HDRINCL", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_HIGHPORT) { db_printf("%sINP_HIGHPORT", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_LOWPORT) { db_printf("%sINP_LOWPORT", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_ANONPORT) { db_printf("%sINP_ANONPORT", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_RECVIF) { db_printf("%sINP_RECVIF", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_MTUDISC) { db_printf("%sINP_MTUDISC", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_RECVTTL) { db_printf("%sINP_RECVTTL", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_DONTFRAG) { db_printf("%sINP_DONTFRAG", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_RECVTOS) { db_printf("%sINP_RECVTOS", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_IPV6_V6ONLY) { db_printf("%sIN6P_IPV6_V6ONLY", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_PKTINFO) { db_printf("%sIN6P_PKTINFO", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_HOPLIMIT) { db_printf("%sIN6P_HOPLIMIT", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_HOPOPTS) { db_printf("%sIN6P_HOPOPTS", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_DSTOPTS) { db_printf("%sIN6P_DSTOPTS", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_RTHDR) { db_printf("%sIN6P_RTHDR", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_RTHDRDSTOPTS) { db_printf("%sIN6P_RTHDRDSTOPTS", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_TCLASS) { db_printf("%sIN6P_TCLASS", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_AUTOFLOWLABEL) { db_printf("%sIN6P_AUTOFLOWLABEL", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_ONESBCAST) { db_printf("%sINP_ONESBCAST", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_DROPPED) { db_printf("%sINP_DROPPED", comma ? ", " : ""); comma = 1; } if (inp_flags & INP_SOCKREF) { db_printf("%sINP_SOCKREF", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_RFC2292) { db_printf("%sIN6P_RFC2292", comma ? ", " : ""); comma = 1; } if (inp_flags & IN6P_MTU) { db_printf("IN6P_MTU%s", comma ? ", " : ""); comma = 1; } } static void db_print_inpvflag(u_char inp_vflag) { int comma; comma = 0; if (inp_vflag & INP_IPV4) { db_printf("%sINP_IPV4", comma ? ", " : ""); comma = 1; } if (inp_vflag & INP_IPV6) { db_printf("%sINP_IPV6", comma ? ", " : ""); comma = 1; } if (inp_vflag & INP_IPV6PROTO) { db_printf("%sINP_IPV6PROTO", comma ? ", " : ""); comma = 1; } } static void db_print_inpcb(struct inpcb *inp, const char *name, int indent) { db_print_indent(indent); db_printf("%s at %p\n", name, inp); indent += 2; db_print_indent(indent); db_printf("inp_flow: 0x%x\n", inp->inp_flow); db_print_inconninfo(&inp->inp_inc, "inp_conninfo", indent); db_print_indent(indent); db_printf("inp_ppcb: %p inp_pcbinfo: %p inp_socket: %p\n", inp->inp_ppcb, inp->inp_pcbinfo, inp->inp_socket); db_print_indent(indent); db_printf("inp_label: %p inp_flags: 0x%x (", inp->inp_label, inp->inp_flags); db_print_inpflags(inp->inp_flags); db_printf(")\n"); db_print_indent(indent); db_printf("inp_sp: %p inp_vflag: 0x%x (", inp->inp_sp, inp->inp_vflag); db_print_inpvflag(inp->inp_vflag); db_printf(")\n"); db_print_indent(indent); db_printf("inp_ip_ttl: %d inp_ip_p: %d inp_ip_minttl: %d\n", inp->inp_ip_ttl, inp->inp_ip_p, inp->inp_ip_minttl); db_print_indent(indent); #ifdef INET6 if (inp->inp_vflag & INP_IPV6) { db_printf("in6p_options: %p in6p_outputopts: %p " "in6p_moptions: %p\n", inp->in6p_options, inp->in6p_outputopts, inp->in6p_moptions); db_printf("in6p_icmp6filt: %p in6p_cksum %d " "in6p_hops %u\n", inp->in6p_icmp6filt, inp->in6p_cksum, inp->in6p_hops); } else #endif { db_printf("inp_ip_tos: %d inp_ip_options: %p " "inp_ip_moptions: %p\n", inp->inp_ip_tos, inp->inp_options, inp->inp_moptions); } db_print_indent(indent); db_printf("inp_phd: %p inp_gencnt: %ju\n", inp->inp_phd, (uintmax_t)inp->inp_gencnt); } DB_SHOW_COMMAND(inpcb, db_show_inpcb) { struct inpcb *inp; if (!have_addr) { db_printf("usage: show inpcb \n"); return; } inp = (struct inpcb *)addr; db_print_inpcb(inp, "inpcb", 0); } #endif /* DDB */ #ifdef RATELIMIT /* * Modify TX rate limit based on the existing "inp->inp_snd_tag", * if any. */ int in_pcbmodify_txrtlmt(struct inpcb *inp, uint32_t max_pacing_rate) { union if_snd_tag_modify_params params = { .rate_limit.max_rate = max_pacing_rate, .rate_limit.flags = M_NOWAIT, }; struct m_snd_tag *mst; int error; mst = inp->inp_snd_tag; if (mst == NULL) return (EINVAL); if (mst->sw->snd_tag_modify == NULL) { error = EOPNOTSUPP; } else { error = mst->sw->snd_tag_modify(mst, ¶ms); } return (error); } /* * Query existing TX rate limit based on the existing * "inp->inp_snd_tag", if any. */ int in_pcbquery_txrtlmt(struct inpcb *inp, uint32_t *p_max_pacing_rate) { union if_snd_tag_query_params params = { }; struct m_snd_tag *mst; int error; mst = inp->inp_snd_tag; if (mst == NULL) return (EINVAL); if (mst->sw->snd_tag_query == NULL) { error = EOPNOTSUPP; } else { error = mst->sw->snd_tag_query(mst, ¶ms); if (error == 0 && p_max_pacing_rate != NULL) *p_max_pacing_rate = params.rate_limit.max_rate; } return (error); } /* * Query existing TX queue level based on the existing * "inp->inp_snd_tag", if any. */ int in_pcbquery_txrlevel(struct inpcb *inp, uint32_t *p_txqueue_level) { union if_snd_tag_query_params params = { }; struct m_snd_tag *mst; int error; mst = inp->inp_snd_tag; if (mst == NULL) return (EINVAL); if (mst->sw->snd_tag_query == NULL) return (EOPNOTSUPP); error = mst->sw->snd_tag_query(mst, ¶ms); if (error == 0 && p_txqueue_level != NULL) *p_txqueue_level = params.rate_limit.queue_level; return (error); } /* * Allocate a new TX rate limit send tag from the network interface * given by the "ifp" argument and save it in "inp->inp_snd_tag": */ int in_pcbattach_txrtlmt(struct inpcb *inp, struct ifnet *ifp, uint32_t flowtype, uint32_t flowid, uint32_t max_pacing_rate, struct m_snd_tag **st) { union if_snd_tag_alloc_params params = { .rate_limit.hdr.type = (max_pacing_rate == -1U) ? IF_SND_TAG_TYPE_UNLIMITED : IF_SND_TAG_TYPE_RATE_LIMIT, .rate_limit.hdr.flowid = flowid, .rate_limit.hdr.flowtype = flowtype, .rate_limit.hdr.numa_domain = inp->inp_numa_domain, .rate_limit.max_rate = max_pacing_rate, .rate_limit.flags = M_NOWAIT, }; int error; INP_WLOCK_ASSERT(inp); /* * If there is already a send tag, or the INP is being torn * down, allocating a new send tag is not allowed. Else send * tags may leak. */ if (*st != NULL || (inp->inp_flags & INP_DROPPED) != 0) return (EINVAL); error = m_snd_tag_alloc(ifp, ¶ms, st); #ifdef INET if (error == 0) { counter_u64_add(rate_limit_set_ok, 1); counter_u64_add(rate_limit_active, 1); } else if (error != EOPNOTSUPP) counter_u64_add(rate_limit_alloc_fail, 1); #endif return (error); } void in_pcbdetach_tag(struct m_snd_tag *mst) { m_snd_tag_rele(mst); #ifdef INET counter_u64_add(rate_limit_active, -1); #endif } /* * Free an existing TX rate limit tag based on the "inp->inp_snd_tag", * if any: */ void in_pcbdetach_txrtlmt(struct inpcb *inp) { struct m_snd_tag *mst; INP_WLOCK_ASSERT(inp); mst = inp->inp_snd_tag; inp->inp_snd_tag = NULL; if (mst == NULL) return; m_snd_tag_rele(mst); #ifdef INET counter_u64_add(rate_limit_active, -1); #endif } int in_pcboutput_txrtlmt_locked(struct inpcb *inp, struct ifnet *ifp, struct mbuf *mb, uint32_t max_pacing_rate) { int error; /* * If the existing send tag is for the wrong interface due to * a route change, first drop the existing tag. Set the * CHANGED flag so that we will keep trying to allocate a new * tag if we fail to allocate one this time. */ if (inp->inp_snd_tag != NULL && inp->inp_snd_tag->ifp != ifp) { in_pcbdetach_txrtlmt(inp); inp->inp_flags2 |= INP_RATE_LIMIT_CHANGED; } /* * NOTE: When attaching to a network interface a reference is * made to ensure the network interface doesn't go away until * all ratelimit connections are gone. The network interface * pointers compared below represent valid network interfaces, * except when comparing towards NULL. */ if (max_pacing_rate == 0 && inp->inp_snd_tag == NULL) { error = 0; } else if (!(ifp->if_capenable & IFCAP_TXRTLMT)) { if (inp->inp_snd_tag != NULL) in_pcbdetach_txrtlmt(inp); error = 0; } else if (inp->inp_snd_tag == NULL) { /* * In order to utilize packet pacing with RSS, we need * to wait until there is a valid RSS hash before we * can proceed: */ if (M_HASHTYPE_GET(mb) == M_HASHTYPE_NONE) { error = EAGAIN; } else { error = in_pcbattach_txrtlmt(inp, ifp, M_HASHTYPE_GET(mb), mb->m_pkthdr.flowid, max_pacing_rate, &inp->inp_snd_tag); } } else { error = in_pcbmodify_txrtlmt(inp, max_pacing_rate); } if (error == 0 || error == EOPNOTSUPP) inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; return (error); } /* * This function should be called when the INP_RATE_LIMIT_CHANGED flag * is set in the fast path and will attach/detach/modify the TX rate * limit send tag based on the socket's so_max_pacing_rate value. */ void in_pcboutput_txrtlmt(struct inpcb *inp, struct ifnet *ifp, struct mbuf *mb) { struct socket *socket; uint32_t max_pacing_rate; bool did_upgrade; if (inp == NULL) return; socket = inp->inp_socket; if (socket == NULL) return; if (!INP_WLOCKED(inp)) { /* * NOTE: If the write locking fails, we need to bail * out and use the non-ratelimited ring for the * transmit until there is a new chance to get the * write lock. */ if (!INP_TRY_UPGRADE(inp)) return; did_upgrade = 1; } else { did_upgrade = 0; } /* * NOTE: The so_max_pacing_rate value is read unlocked, * because atomic updates are not required since the variable * is checked at every mbuf we send. It is assumed that the * variable read itself will be atomic. */ max_pacing_rate = socket->so_max_pacing_rate; in_pcboutput_txrtlmt_locked(inp, ifp, mb, max_pacing_rate); if (did_upgrade) INP_DOWNGRADE(inp); } /* * Track route changes for TX rate limiting. */ void in_pcboutput_eagain(struct inpcb *inp) { bool did_upgrade; if (inp == NULL) return; if (inp->inp_snd_tag == NULL) return; if (!INP_WLOCKED(inp)) { /* * NOTE: If the write locking fails, we need to bail * out and use the non-ratelimited ring for the * transmit until there is a new chance to get the * write lock. */ if (!INP_TRY_UPGRADE(inp)) return; did_upgrade = 1; } else { did_upgrade = 0; } /* detach rate limiting */ in_pcbdetach_txrtlmt(inp); /* make sure new mbuf send tag allocation is made */ inp->inp_flags2 |= INP_RATE_LIMIT_CHANGED; if (did_upgrade) INP_DOWNGRADE(inp); } #ifdef INET static void rl_init(void *st) { rate_limit_new = counter_u64_alloc(M_WAITOK); rate_limit_chg = counter_u64_alloc(M_WAITOK); rate_limit_active = counter_u64_alloc(M_WAITOK); rate_limit_alloc_fail = counter_u64_alloc(M_WAITOK); rate_limit_set_ok = counter_u64_alloc(M_WAITOK); } SYSINIT(rl, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, rl_init, NULL); #endif #endif /* RATELIMIT */ diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h index d2cfd7f53700..adde89dde873 100644 --- a/sys/netinet/in_pcb.h +++ b/sys/netinet/in_pcb.h @@ -1,745 +1,743 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. * Copyright (c) 2010-2011 Juniper Networks, Inc. * All rights reserved. * * Portions of this software were developed by Robert N. M. Watson under * contract to Juniper Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _NETINET_IN_PCB_H_ #define _NETINET_IN_PCB_H_ #include #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #include #include #endif #include /* * struct inpcb is the common protocol control block structure used in most * IP transport protocols. * * Pointers to local and foreign host table entries, local and foreign socket * numbers, and pointers up (to a socket structure) and down (to a * protocol-specific control block) are stored here. */ CK_LIST_HEAD(inpcbhead, inpcb); CK_LIST_HEAD(inpcbporthead, inpcbport); CK_LIST_HEAD(inpcblbgrouphead, inpcblbgroup); typedef uint64_t inp_gen_t; /* * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet. * So, AF_INET6 null laddr is also used as AF_INET null laddr, by utilizing * the following structure. This requires padding always be zeroed out, * which is done right after inpcb allocation and stays through its lifetime. */ struct in_addr_4in6 { u_int32_t ia46_pad32[3]; struct in_addr ia46_addr4; }; union in_dependaddr { struct in_addr_4in6 id46_addr; struct in6_addr id6_addr; }; /* * NOTE: ipv6 addrs should be 64-bit aligned, per RFC 2553. in_conninfo has * some extra padding to accomplish this. * NOTE 2: tcp_syncache.c uses first 5 32-bit words, which identify fport, * lport, faddr to generate hash, so these fields shouldn't be moved. */ struct in_endpoints { u_int16_t ie_fport; /* foreign port */ u_int16_t ie_lport; /* local port */ /* protocol dependent part, local and foreign addr */ union in_dependaddr ie_dependfaddr; /* foreign host table entry */ union in_dependaddr ie_dependladdr; /* local host table entry */ #define ie_faddr ie_dependfaddr.id46_addr.ia46_addr4 #define ie_laddr ie_dependladdr.id46_addr.ia46_addr4 #define ie6_faddr ie_dependfaddr.id6_addr #define ie6_laddr ie_dependladdr.id6_addr u_int32_t ie6_zoneid; /* scope zone id */ }; /* * XXX The defines for inc_* are hacks and should be changed to direct * references. */ struct in_conninfo { u_int8_t inc_flags; u_int8_t inc_len; u_int16_t inc_fibnum; /* XXX was pad, 16 bits is plenty */ /* protocol dependent part */ struct in_endpoints inc_ie; }; /* * Flags for inc_flags. */ #define INC_ISIPV6 0x01 #define INC_IPV6MINMTU 0x02 #define inc_fport inc_ie.ie_fport #define inc_lport inc_ie.ie_lport #define inc_faddr inc_ie.ie_faddr #define inc_laddr inc_ie.ie_laddr #define inc6_faddr inc_ie.ie6_faddr #define inc6_laddr inc_ie.ie6_laddr #define inc6_zoneid inc_ie.ie6_zoneid #if defined(_KERNEL) || defined(_WANT_INPCB) /* * struct inpcb captures the network layer state for TCP, UDP, and raw IPv4 and * IPv6 sockets. In the case of TCP and UDP, further per-connection state is * hung off of inp_ppcb most of the time. Almost all fields of struct inpcb * are static after creation or protected by a per-inpcb rwlock, inp_lock. * * A inpcb database is indexed by addresses/ports hash as well as list of * all pcbs that belong to a certain proto. Database lookups or list traversals * are be performed inside SMR section. Once desired PCB is found its own * lock is to be obtained and SMR section exited. * * Key: * (c) - Constant after initialization * (e) - Protected by the SMR section * (i) - Protected by the inpcb lock * (p) - Protected by the pcbinfo lock for the inpcb * (h) - Protected by the pcbhash lock for the inpcb * (s) - Protected by another subsystem's locks * (x) - Undefined locking * * A few other notes: * * When a read lock is held, stability of the field is guaranteed; to write * to a field, a write lock must generally be held. * * netinet/netinet6-layer code should not assume that the inp_socket pointer * is safe to dereference without inp_lock being held, there may be * close(2)-related races. * * The inp_vflag field is overloaded, and would otherwise ideally be (c). */ struct icmp6_filter; struct inpcbpolicy; struct m_snd_tag; struct inpcb { /* Cache line #1 (amd64) */ CK_LIST_ENTRY(inpcb) inp_hash_exact; /* hash table linkage */ CK_LIST_ENTRY(inpcb) inp_hash_wild; /* hash table linkage */ struct rwlock inp_lock; /* Cache line #2 (amd64) */ #define inp_start_zero inp_refcount #define inp_zero_size (sizeof(struct inpcb) - \ offsetof(struct inpcb, inp_start_zero)) u_int inp_refcount; /* (i) refcount */ int inp_flags; /* (i) generic IP/datagram flags */ int inp_flags2; /* (i) generic IP/datagram flags #2*/ uint8_t inp_numa_domain; /* numa domain */ void *inp_ppcb; /* (i) pointer to per-protocol pcb */ struct socket *inp_socket; /* (i) back pointer to socket */ struct inpcbinfo *inp_pcbinfo; /* (c) PCB list info */ struct ucred *inp_cred; /* (c) cache of socket cred */ u_int32_t inp_flow; /* (i) IPv6 flow information */ u_char inp_vflag; /* (i) IP version flag (v4/v6) */ u_char inp_ip_ttl; /* (i) time to live proto */ u_char inp_ip_p; /* (c) protocol proto */ u_char inp_ip_minttl; /* (i) minimum TTL or drop */ uint32_t inp_flowid; /* (x) flow id / queue id */ smr_seq_t inp_smr; /* (i) sequence number at disconnect */ struct m_snd_tag *inp_snd_tag; /* (i) send tag for outgoing mbufs */ uint32_t inp_flowtype; /* (x) M_HASHTYPE value */ /* Local and foreign ports, local and foreign addr. */ struct in_conninfo inp_inc; /* (i,h) list for PCB's local port */ /* MAC and IPSEC policy information. */ struct label *inp_label; /* (i) MAC label */ struct inpcbpolicy *inp_sp; /* (s) for IPSEC */ /* Protocol-dependent part; options. */ struct { u_char inp_ip_tos; /* (i) type of service proto */ struct mbuf *inp_options; /* (i) IP options */ struct ip_moptions *inp_moptions; /* (i) mcast options */ }; struct { /* (i) IP options */ struct mbuf *in6p_options; /* (i) IP6 options for outgoing packets */ struct ip6_pktopts *in6p_outputopts; /* (i) IP multicast options */ struct ip6_moptions *in6p_moptions; /* (i) ICMPv6 code type filter */ struct icmp6_filter *in6p_icmp6filt; /* (i) IPV6_CHECKSUM setsockopt */ int in6p_cksum; short in6p_hops; }; CK_LIST_ENTRY(inpcb) inp_portlist; /* (r:e/w:h) port list */ struct inpcbport *inp_phd; /* (r:e/w:h) head of this list */ inp_gen_t inp_gencnt; /* (c) generation count */ void *spare_ptr; /* Spare pointer. */ rt_gen_t inp_rt_cookie; /* generation for route entry */ union { /* cached L3 information */ struct route inp_route; struct route_in6 inp_route6; }; CK_LIST_ENTRY(inpcb) inp_list; /* (r:e/w:p) all PCBs for proto */ }; #endif /* _KERNEL */ #define inp_fport inp_inc.inc_fport #define inp_lport inp_inc.inc_lport #define inp_faddr inp_inc.inc_faddr #define inp_laddr inp_inc.inc_laddr #define in6p_faddr inp_inc.inc6_faddr #define in6p_laddr inp_inc.inc6_laddr #define in6p_zoneid inp_inc.inc6_zoneid #define inp_vnet inp_pcbinfo->ipi_vnet /* * The range of the generation count, as used in this implementation, is 9e19. * We would have to create 300 billion connections per second for this number * to roll over in a year. This seems sufficiently unlikely that we simply * don't concern ourselves with that possibility. */ /* * Interface exported to userland by various protocols which use inpcbs. Hack * alert -- only define if struct xsocket is in scope. * Fields prefixed with "xi_" are unique to this structure, and the rest * match fields in the struct inpcb, to ease coding and porting. * * Legend: * (s) - used by userland utilities in src * (p) - used by utilities in ports * (3) - is known to be used by third party software not in ports * (n) - no known usage */ #ifdef _SYS_SOCKETVAR_H_ struct xinpcb { ksize_t xi_len; /* length of this structure */ struct xsocket xi_socket; /* (s,p) */ struct in_conninfo inp_inc; /* (s,p) */ uint64_t inp_gencnt; /* (s,p) */ kvaddr_t inp_ppcb; /* (s) netstat(1) */ int64_t inp_spare64[4]; uint32_t inp_flow; /* (s) */ uint32_t inp_flowid; /* (s) */ uint32_t inp_flowtype; /* (s) */ int32_t inp_flags; /* (s,p) */ int32_t inp_flags2; /* (s) */ uint32_t inp_unused; int32_t in6p_cksum; /* (n) */ int32_t inp_spare32[4]; uint16_t in6p_hops; /* (n) */ uint8_t inp_ip_tos; /* (n) */ int8_t pad8; uint8_t inp_vflag; /* (s,p) */ uint8_t inp_ip_ttl; /* (n) */ uint8_t inp_ip_p; /* (n) */ uint8_t inp_ip_minttl; /* (n) */ int8_t inp_spare8[4]; } __aligned(8); struct xinpgen { ksize_t xig_len; /* length of this structure */ u_int xig_count; /* number of PCBs at this time */ uint32_t _xig_spare32; inp_gen_t xig_gen; /* generation count at this time */ so_gen_t xig_sogen; /* socket generation count this time */ uint64_t _xig_spare64[4]; } __aligned(8); struct sockopt_parameters { struct in_conninfo sop_inc; uint64_t sop_id; int sop_level; int sop_optname; char sop_optval[]; }; #ifdef _KERNEL int sysctl_setsockopt(SYSCTL_HANDLER_ARGS, struct inpcbinfo *pcbinfo, int (*ctloutput_set)(struct inpcb *, struct sockopt *)); void in_pcbtoxinpcb(const struct inpcb *, struct xinpcb *); #endif #endif /* _SYS_SOCKETVAR_H_ */ #ifdef _KERNEL /* * Per-VNET pcb database for each high-level protocol (UDP, TCP, ...) in both * IPv4 and IPv6. * * The pcbs are protected with SMR section and thus all lists in inpcbinfo * are CK-lists. Locking is required to insert a pcb into database. Two * locks are provided: one for the hash and one for the global list of pcbs, * as well as overall count and generation count. * * Locking key: * * (c) Constant or nearly constant after initialisation * (e) Protected by SMR section * (g) Locked by ipi_lock * (h) Locked by ipi_hash_lock */ struct inpcbinfo { /* * Global lock protecting inpcb list modification */ struct mtx ipi_lock; struct inpcbhead ipi_listhead; /* (r:e/w:g) */ u_int ipi_count; /* (g) */ /* * Generation count -- incremented each time a connection is allocated * or freed. */ u_quad_t ipi_gencnt; /* (g) */ /* * Fields associated with port lookup and allocation. */ u_short ipi_lastport; /* (h) */ u_short ipi_lastlow; /* (h) */ u_short ipi_lasthi; /* (h) */ /* * UMA zone from which inpcbs are allocated for this protocol. */ uma_zone_t ipi_zone; /* (c) */ uma_zone_t ipi_portzone; /* (c) */ smr_t ipi_smr; /* (c) */ /* * Global hash of inpcbs, hashed by local and foreign addresses and * port numbers. The "exact" hash holds PCBs connected to a foreign * address, and "wild" holds the rest. */ struct mtx ipi_hash_lock; struct inpcbhead *ipi_hash_exact; /* (r:e/w:h) */ struct inpcbhead *ipi_hash_wild; /* (r:e/w:h) */ u_long ipi_hashmask; /* (c) */ /* * Global hash of inpcbs, hashed by only local port number. */ struct inpcbporthead *ipi_porthashbase; /* (h) */ u_long ipi_porthashmask; /* (h) */ /* * Load balance groups used for the SO_REUSEPORT_LB option, * hashed by local port. */ struct inpcblbgrouphead *ipi_lbgrouphashbase; /* (r:e/w:h) */ u_long ipi_lbgrouphashmask; /* (h) */ /* * Pointer to network stack instance */ struct vnet *ipi_vnet; /* (c) */ }; /* * Global allocation storage for each high-level protocol (UDP, TCP, ...). * Each corresponding per-VNET inpcbinfo points into this one. */ struct inpcbstorage { uma_zone_t ips_zone; uma_zone_t ips_portzone; uma_init ips_pcbinit; size_t ips_size; const char * ips_zone_name; const char * ips_portzone_name; const char * ips_infolock_name; const char * ips_hashlock_name; }; #define INPCBSTORAGE_DEFINE(prot, ppcb, lname, zname, iname, hname) \ static int \ prot##_inpcb_init(void *mem, int size __unused, int flags __unused) \ { \ struct inpcb *inp = mem; \ \ rw_init_flags(&inp->inp_lock, lname, RW_RECURSE | RW_DUPOK); \ return (0); \ } \ static struct inpcbstorage prot = { \ .ips_size = sizeof(struct ppcb), \ .ips_pcbinit = prot##_inpcb_init, \ .ips_zone_name = zname, \ .ips_portzone_name = zname " ports", \ .ips_infolock_name = iname, \ .ips_hashlock_name = hname, \ }; \ SYSINIT(prot##_inpcbstorage_init, SI_SUB_PROTO_DOMAIN, \ SI_ORDER_SECOND, in_pcbstorage_init, &prot); \ SYSUNINIT(prot##_inpcbstorage_uninit, SI_SUB_PROTO_DOMAIN, \ SI_ORDER_SECOND, in_pcbstorage_destroy, &prot) /* * Load balance groups used for the SO_REUSEPORT_LB socket option. Each group * (or unique address:port combination) can be re-used at most * INPCBLBGROUP_SIZMAX (256) times. The inpcbs are stored in il_inp which * is dynamically resized as processes bind/unbind to that specific group. */ struct inpcblbgroup { CK_LIST_ENTRY(inpcblbgroup) il_list; struct epoch_context il_epoch_ctx; struct ucred *il_cred; uint16_t il_lport; /* (c) */ u_char il_vflag; /* (c) */ uint8_t il_numa_domain; uint32_t il_pad2; union in_dependaddr il_dependladdr; /* (c) */ #define il_laddr il_dependladdr.id46_addr.ia46_addr4 #define il6_laddr il_dependladdr.id6_addr uint32_t il_inpsiz; /* max count in il_inp[] (h) */ uint32_t il_inpcnt; /* cur count in il_inp[] (h) */ struct inpcb *il_inp[]; /* (h) */ }; #define INP_LOCK_DESTROY(inp) rw_destroy(&(inp)->inp_lock) #define INP_RLOCK(inp) rw_rlock(&(inp)->inp_lock) #define INP_WLOCK(inp) rw_wlock(&(inp)->inp_lock) #define INP_TRY_RLOCK(inp) rw_try_rlock(&(inp)->inp_lock) #define INP_TRY_WLOCK(inp) rw_try_wlock(&(inp)->inp_lock) #define INP_RUNLOCK(inp) rw_runlock(&(inp)->inp_lock) #define INP_WUNLOCK(inp) rw_wunlock(&(inp)->inp_lock) #define INP_UNLOCK(inp) rw_unlock(&(inp)->inp_lock) #define INP_TRY_UPGRADE(inp) rw_try_upgrade(&(inp)->inp_lock) #define INP_DOWNGRADE(inp) rw_downgrade(&(inp)->inp_lock) #define INP_WLOCKED(inp) rw_wowned(&(inp)->inp_lock) #define INP_LOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_LOCKED) #define INP_RLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_RLOCKED) #define INP_WLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_WLOCKED) #define INP_UNLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_UNLOCKED) /* * These locking functions are for inpcb consumers outside of sys/netinet, * more specifically, they were added for the benefit of TOE drivers. The * macros are reserved for use by the stack. */ void inp_wlock(struct inpcb *); void inp_wunlock(struct inpcb *); void inp_rlock(struct inpcb *); void inp_runlock(struct inpcb *); #ifdef INVARIANT_SUPPORT void inp_lock_assert(struct inpcb *); void inp_unlock_assert(struct inpcb *); #else #define inp_lock_assert(inp) do {} while (0) #define inp_unlock_assert(inp) do {} while (0) #endif void inp_apply_all(struct inpcbinfo *, void (*func)(struct inpcb *, void *), void *arg); int inp_ip_tos_get(const struct inpcb *inp); void inp_ip_tos_set(struct inpcb *inp, int val); struct socket * inp_inpcbtosocket(struct inpcb *inp); struct tcpcb * inp_inpcbtotcpcb(struct inpcb *inp); void inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp, uint32_t *faddr, uint16_t *fp); #endif /* _KERNEL */ #define INP_INFO_WLOCK(ipi) mtx_lock(&(ipi)->ipi_lock) #define INP_INFO_WLOCKED(ipi) mtx_owned(&(ipi)->ipi_lock) #define INP_INFO_WUNLOCK(ipi) mtx_unlock(&(ipi)->ipi_lock) #define INP_INFO_LOCK_ASSERT(ipi) MPASS(SMR_ENTERED((ipi)->ipi_smr) || \ mtx_owned(&(ipi)->ipi_lock)) #define INP_INFO_WLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_lock, MA_OWNED) #define INP_INFO_WUNLOCK_ASSERT(ipi) \ mtx_assert(&(ipi)->ipi_lock, MA_NOTOWNED) #define INP_HASH_WLOCK(ipi) mtx_lock(&(ipi)->ipi_hash_lock) #define INP_HASH_WUNLOCK(ipi) mtx_unlock(&(ipi)->ipi_hash_lock) #define INP_HASH_LOCK_ASSERT(ipi) MPASS(SMR_ENTERED((ipi)->ipi_smr) || \ mtx_owned(&(ipi)->ipi_hash_lock)) #define INP_HASH_WLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_hash_lock, \ MA_OWNED) /* * Wildcard matching hash is not just a microoptimisation! The hash for * wildcard IPv4 and wildcard IPv6 must be the same, otherwise AF_INET6 * wildcard bound pcb won't be able to receive AF_INET connections, while: * jenkins_hash(&zeroes, 1, s) != jenkins_hash(&zeroes, 4, s) * See also comment above struct in_addr_4in6. */ #define IN_ADDR_JHASH32(addr) \ ((addr)->s_addr == INADDR_ANY ? V_in_pcbhashseed : \ jenkins_hash32((&(addr)->s_addr), 1, V_in_pcbhashseed)) #define IN6_ADDR_JHASH32(addr) \ (memcmp((addr), &in6addr_any, sizeof(in6addr_any)) == 0 ? \ V_in_pcbhashseed : \ jenkins_hash32((addr)->__u6_addr.__u6_addr32, \ nitems((addr)->__u6_addr.__u6_addr32), V_in_pcbhashseed)) #define INP_PCBHASH(faddr, lport, fport, mask) \ ((IN_ADDR_JHASH32(faddr) ^ ntohs((lport) ^ (fport))) & (mask)) #define INP6_PCBHASH(faddr, lport, fport, mask) \ ((IN6_ADDR_JHASH32(faddr) ^ ntohs((lport) ^ (fport))) & (mask)) #define INP_PCBHASH_WILD(lport, mask) \ ((V_in_pcbhashseed ^ ntohs(lport)) & (mask)) #define INP_PCBLBGROUP_PKTHASH(faddr, lport, fport) \ (IN_ADDR_JHASH32(faddr) ^ ntohs((lport) ^ (fport))) #define INP6_PCBLBGROUP_PKTHASH(faddr, lport, fport) \ (IN6_ADDR_JHASH32(faddr) ^ ntohs((lport) ^ (fport))) #define INP_PCBPORTHASH(lport, mask) (ntohs((lport)) & (mask)) /* * Flags for inp_vflags -- historically version flags only */ #define INP_IPV4 0x1 #define INP_IPV6 0x2 #define INP_IPV6PROTO 0x4 /* opened under IPv6 protocol */ /* * Flags for inp_flags. */ #define INP_RECVOPTS 0x00000001 /* receive incoming IP options */ #define INP_RECVRETOPTS 0x00000002 /* receive IP options for reply */ #define INP_RECVDSTADDR 0x00000004 /* receive IP dst address */ #define INP_HDRINCL 0x00000008 /* user supplies entire IP header */ #define INP_HIGHPORT 0x00000010 /* user wants "high" port binding */ #define INP_LOWPORT 0x00000020 /* user wants "low" port binding */ #define INP_ANONPORT 0x00000040 /* read by netstat(1) */ #define INP_RECVIF 0x00000080 /* receive incoming interface */ #define INP_MTUDISC 0x00000100 /* user can do MTU discovery */ /* INP_FREED 0x00000200 private to in_pcb.c */ #define INP_RECVTTL 0x00000400 /* receive incoming IP TTL */ #define INP_DONTFRAG 0x00000800 /* don't fragment packet */ #define INP_BINDANY 0x00001000 /* allow bind to any address */ #define INP_INHASHLIST 0x00002000 /* in_pcbinshash() has been called */ #define INP_RECVTOS 0x00004000 /* receive incoming IP TOS */ #define IN6P_IPV6_V6ONLY 0x00008000 /* restrict AF_INET6 socket for v6 */ #define IN6P_PKTINFO 0x00010000 /* receive IP6 dst and I/F */ #define IN6P_HOPLIMIT 0x00020000 /* receive hoplimit */ #define IN6P_HOPOPTS 0x00040000 /* receive hop-by-hop options */ #define IN6P_DSTOPTS 0x00080000 /* receive dst options after rthdr */ #define IN6P_RTHDR 0x00100000 /* receive routing header */ #define IN6P_RTHDRDSTOPTS 0x00200000 /* receive dstoptions before rthdr */ #define IN6P_TCLASS 0x00400000 /* receive traffic class value */ #define IN6P_AUTOFLOWLABEL 0x00800000 /* attach flowlabel automatically */ /* INP_INLBGROUP 0x01000000 private to in_pcb.c */ #define INP_ONESBCAST 0x02000000 /* send all-ones broadcast */ #define INP_DROPPED 0x04000000 /* protocol drop flag */ #define INP_SOCKREF 0x08000000 /* strong socket reference */ #define INP_RESERVED_0 0x10000000 /* reserved field */ #define INP_RESERVED_1 0x20000000 /* reserved field */ #define IN6P_RFC2292 0x40000000 /* used RFC2292 API on the socket */ #define IN6P_MTU 0x80000000 /* receive path MTU */ #define INP_CONTROLOPTS (INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|\ INP_RECVIF|INP_RECVTTL|INP_RECVTOS|\ IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\ IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS|\ IN6P_TCLASS|IN6P_AUTOFLOWLABEL|IN6P_RFC2292|\ IN6P_MTU) /* * Flags for inp_flags2. */ /* 0x00000001 */ /* 0x00000002 */ /* 0x00000004 */ /* 0x00000008 */ /* 0x00000010 */ /* 0x00000020 */ /* 0x00000040 */ /* 0x00000080 */ #define INP_RECVFLOWID 0x00000100 /* populate recv datagram with flow info */ #define INP_RECVRSSBUCKETID 0x00000200 /* populate recv datagram with bucket id */ #define INP_RATE_LIMIT_CHANGED 0x00000400 /* rate limit needs attention */ #define INP_ORIGDSTADDR 0x00000800 /* receive IP dst address/port */ /* 0x00001000 */ /* 0x00002000 */ /* 0x00004000 */ /* 0x00008000 */ /* 0x00010000 */ #define INP_2PCP_SET 0x00020000 /* If the Eth PCP should be set explicitly */ #define INP_2PCP_BIT0 0x00040000 /* Eth PCP Bit 0 */ #define INP_2PCP_BIT1 0x00080000 /* Eth PCP Bit 1 */ #define INP_2PCP_BIT2 0x00100000 /* Eth PCP Bit 2 */ #define INP_2PCP_BASE INP_2PCP_BIT0 #define INP_2PCP_MASK (INP_2PCP_BIT0 | INP_2PCP_BIT1 | INP_2PCP_BIT2) #define INP_2PCP_SHIFT 18 /* shift PCP field in/out of inp_flags2 */ /* * Flags passed to in_pcblookup*(), inp_smr_lock() and inp_next(). */ typedef enum { INPLOOKUP_WILDCARD = 0x00000001, /* Allow wildcard sockets. */ INPLOOKUP_RLOCKPCB = 0x00000002, /* Return inpcb read-locked. */ INPLOOKUP_WLOCKPCB = 0x00000004, /* Return inpcb write-locked. */ } inp_lookup_t; #define INPLOOKUP_MASK (INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB | \ INPLOOKUP_WLOCKPCB) #define INPLOOKUP_LOCKMASK (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB) #define sotoinpcb(so) ((struct inpcb *)(so)->so_pcb) #define INP_SOCKAF(so) so->so_proto->pr_domain->dom_family #define INP_CHECK_SOCKAF(so, af) (INP_SOCKAF(so) == af) #ifdef _KERNEL VNET_DECLARE(int, ipport_reservedhigh); VNET_DECLARE(int, ipport_reservedlow); VNET_DECLARE(int, ipport_lowfirstauto); VNET_DECLARE(int, ipport_lowlastauto); VNET_DECLARE(int, ipport_firstauto); VNET_DECLARE(int, ipport_lastauto); VNET_DECLARE(int, ipport_hifirstauto); VNET_DECLARE(int, ipport_hilastauto); VNET_DECLARE(int, ipport_randomized); #define V_ipport_reservedhigh VNET(ipport_reservedhigh) #define V_ipport_reservedlow VNET(ipport_reservedlow) #define V_ipport_lowfirstauto VNET(ipport_lowfirstauto) #define V_ipport_lowlastauto VNET(ipport_lowlastauto) #define V_ipport_firstauto VNET(ipport_firstauto) #define V_ipport_lastauto VNET(ipport_lastauto) #define V_ipport_hifirstauto VNET(ipport_hifirstauto) #define V_ipport_hilastauto VNET(ipport_hilastauto) #define V_ipport_randomized VNET(ipport_randomized) void in_pcbinfo_init(struct inpcbinfo *, struct inpcbstorage *, u_int, u_int); void in_pcbinfo_destroy(struct inpcbinfo *); void in_pcbstorage_init(void *); void in_pcbstorage_destroy(void *); void in_pcbpurgeif0(struct inpcbinfo *, struct ifnet *); int in_pcballoc(struct socket *, struct inpcbinfo *); int in_pcbbind(struct inpcb *, struct sockaddr_in *, struct ucred *); int in_pcbbind_setup(struct inpcb *, struct sockaddr_in *, in_addr_t *, u_short *, struct ucred *); int in_pcbconnect(struct inpcb *, struct sockaddr_in *, struct ucred *, bool); int in_pcbconnect_setup(struct inpcb *, struct sockaddr_in *, in_addr_t *, u_short *, in_addr_t *, u_short *, struct ucred *); void in_pcbdetach(struct inpcb *); void in_pcbdisconnect(struct inpcb *); void in_pcbdrop(struct inpcb *); void in_pcbfree(struct inpcb *); int in_pcbinshash(struct inpcb *); int in_pcbladdr(struct inpcb *, struct in_addr *, struct in_addr *, struct ucred *); int in_pcblbgroup_numa(struct inpcb *, int arg); struct inpcb * in_pcblookup(struct inpcbinfo *, struct in_addr, u_int, struct in_addr, u_int, int, struct ifnet *); struct inpcb * in_pcblookup_mbuf(struct inpcbinfo *, struct in_addr, u_int, struct in_addr, u_int, int, struct ifnet *, struct mbuf *); void in_pcbref(struct inpcb *); void in_pcbrehash(struct inpcb *); void in_pcbremhash_locked(struct inpcb *); bool in_pcbrele(struct inpcb *, inp_lookup_t); bool in_pcbrele_rlocked(struct inpcb *); bool in_pcbrele_wlocked(struct inpcb *); typedef bool inp_match_t(const struct inpcb *, void *); struct inpcb_iterator { const struct inpcbinfo *ipi; struct inpcb *inp; inp_match_t *match; void *ctx; int hash; #define INP_ALL_LIST -1 const inp_lookup_t lock; }; /* Note: sparse initializers guarantee .inp = NULL. */ #define INP_ITERATOR(_ipi, _lock, _match, _ctx) \ { \ .ipi = (_ipi), \ .lock = (_lock), \ .hash = INP_ALL_LIST, \ .match = (_match), \ .ctx = (_ctx), \ } #define INP_ALL_ITERATOR(_ipi, _lock) \ { \ .ipi = (_ipi), \ .lock = (_lock), \ .hash = INP_ALL_LIST, \ } struct inpcb *inp_next(struct inpcb_iterator *); void in_losing(struct inpcb *); void in_pcbsetsolabel(struct socket *so); -int in_getpeeraddr(struct socket *so, struct sockaddr **nam); -int in_getsockaddr(struct socket *so, struct sockaddr **nam); -struct sockaddr * - in_sockaddr(in_port_t port, struct in_addr *addr); +int in_getpeeraddr(struct socket *, struct sockaddr *sa); +int in_getsockaddr(struct socket *, struct sockaddr *sa); void in_pcbsosetlabel(struct socket *so); #ifdef RATELIMIT int in_pcboutput_txrtlmt_locked(struct inpcb *, struct ifnet *, struct mbuf *, uint32_t); int in_pcbattach_txrtlmt(struct inpcb *, struct ifnet *, uint32_t, uint32_t, uint32_t, struct m_snd_tag **); void in_pcbdetach_txrtlmt(struct inpcb *); void in_pcbdetach_tag(struct m_snd_tag *); int in_pcbmodify_txrtlmt(struct inpcb *, uint32_t); int in_pcbquery_txrtlmt(struct inpcb *, uint32_t *); int in_pcbquery_txrlevel(struct inpcb *, uint32_t *); void in_pcboutput_txrtlmt(struct inpcb *, struct ifnet *, struct mbuf *); void in_pcboutput_eagain(struct inpcb *); #endif #endif /* _KERNEL */ #endif /* !_NETINET_IN_PCB_H_ */ diff --git a/sys/netinet/sctp_os.h b/sys/netinet/sctp_os.h index 42f000dc8d55..78926a3d1017 100644 --- a/sys/netinet/sctp_os.h +++ b/sys/netinet/sctp_os.h @@ -1,71 +1,68 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _NETINET_SCTP_OS_H_ #define _NETINET_SCTP_OS_H_ /* * General kernel memory allocation: * SCTP_MALLOC(element, type, size, name) * SCTP_FREE(element) - * Kernel memory allocation for "soname"- memory must be zeroed. - * SCTP_MALLOC_SONAME(name, type, size) - * SCTP_FREE_SONAME(name) */ /* * Zone(pool) allocation routines: MUST be defined for each OS. * zone = zone/pool pointer. * name = string name of the zone/pool. * size = size of each zone/pool element. * number = number of elements in zone/pool. * type = structure type to allocate * * sctp_zone_t * SCTP_ZONE_INIT(zone, name, size, number) * SCTP_ZONE_GET(zone, type) * SCTP_ZONE_FREE(zone, element) * SCTP_ZONE_DESTROY(zone) */ #include /* All os's must implement this address gatherer. If * no VRF's exist, then vrf 0 is the only one and all * addresses and ifn's live here. */ #define SCTP_DEFAULT_VRF 0 void sctp_init_vrf_list(int vrfid); #endif diff --git a/sys/netinet/sctp_os_bsd.h b/sys/netinet/sctp_os_bsd.h index 7bc0d20b9360..eb0caec942e9 100644 --- a/sys/netinet/sctp_os_bsd.h +++ b/sys/netinet/sctp_os_bsd.h @@ -1,491 +1,484 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _NETINET_SCTP_OS_BSD_H_ #define _NETINET_SCTP_OS_BSD_H_ #include #include "opt_inet6.h" #include "opt_inet.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #include #include #include #endif /* INET6 */ #include #include #include /* Declare all the malloc names for all the various mallocs */ MALLOC_DECLARE(SCTP_M_MAP); MALLOC_DECLARE(SCTP_M_STRMI); MALLOC_DECLARE(SCTP_M_STRMO); MALLOC_DECLARE(SCTP_M_ASC_ADDR); MALLOC_DECLARE(SCTP_M_ASC_IT); MALLOC_DECLARE(SCTP_M_AUTH_CL); MALLOC_DECLARE(SCTP_M_AUTH_KY); MALLOC_DECLARE(SCTP_M_AUTH_HL); MALLOC_DECLARE(SCTP_M_AUTH_IF); MALLOC_DECLARE(SCTP_M_STRESET); MALLOC_DECLARE(SCTP_M_CMSG); MALLOC_DECLARE(SCTP_M_COPYAL); MALLOC_DECLARE(SCTP_M_VRF); MALLOC_DECLARE(SCTP_M_IFA); MALLOC_DECLARE(SCTP_M_IFN); MALLOC_DECLARE(SCTP_M_TIMW); MALLOC_DECLARE(SCTP_M_MVRF); MALLOC_DECLARE(SCTP_M_ITER); MALLOC_DECLARE(SCTP_M_SOCKOPT); MALLOC_DECLARE(SCTP_M_MCORE); #if defined(SCTP_LOCAL_TRACE_BUF) #define SCTP_GET_CYCLECOUNT get_cyclecount() #define SCTP_CTR6 sctp_log_trace #else #define SCTP_CTR6 CTR6 #endif /* * Macros to expand out globals defined by various modules * to either a real global or a virtualized instance of one, * depending on whether VIMAGE is defined. */ /* then define the macro(s) that hook into the vimage macros */ #define MODULE_GLOBAL(__SYMBOL) V_##__SYMBOL #define V_system_base_info VNET(system_base_info) #define SCTP_BASE_INFO(__m) V_system_base_info.sctppcbinfo.__m #define SCTP_BASE_STATS V_system_base_info.sctpstat #define SCTP_BASE_STAT(__m) V_system_base_info.sctpstat.__m #define SCTP_BASE_SYSCTL(__m) V_system_base_info.sctpsysctl.__m #define SCTP_BASE_VAR(__m) V_system_base_info.__m #define SCTP_PRINTF(params...) printf(params) #if defined(SCTP_DEBUG) #define SCTPDBG(level, params...) \ { \ do { \ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \ SCTP_PRINTF(params); \ } \ } while (0); \ } #define SCTPDBG_ADDR(level, addr) \ { \ do { \ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \ sctp_print_address(addr); \ } \ } while (0); \ } #else #define SCTPDBG(level, params...) #define SCTPDBG_ADDR(level, addr) #endif #ifdef SCTP_LTRACE_CHUNKS #define SCTP_LTRACE_CHK(a, b, c, d) if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_CHUNK_ENABLE) SCTP_CTR6(KTR_SUBSYS, "SCTP:%d[%d]:%x-%x-%x-%x", SCTP_LOG_CHUNK_PROC, 0, a, b, c, d) #else #define SCTP_LTRACE_CHK(a, b, c, d) #endif #ifdef SCTP_LTRACE_ERRORS #define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err) \ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_ERROR_ENABLE) \ SCTP_PRINTF("mbuf:%p inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \ m, inp, stcb, net, file, __LINE__, err); #define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err) \ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_ERROR_ENABLE) \ SCTP_PRINTF("inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \ inp, stcb, net, file, __LINE__, err); #else #define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err) #define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err) #endif /* * Local address and interface list handling */ #define SCTP_MAX_VRF_ID 0 #define SCTP_SIZE_OF_VRF_HASH 3 #define SCTP_IFNAMSIZ IFNAMSIZ #define SCTP_DEFAULT_VRFID 0 #define SCTP_VRF_ADDR_HASH_SIZE 16 #define SCTP_VRF_IFN_HASH_SIZE 3 #define SCTP_INIT_VRF_TABLEID(vrf) #define SCTP_IFN_IS_IFT_LOOP(ifn) ((ifn)->ifn_type == IFT_LOOP) #define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_nh && (ro)->ro_nh->nh_ifa && (ro)->ro_nh->nh_ifa->ifa_ifp && (ro)->ro_nh->nh_ifa->ifa_ifp->if_type == IFT_LOOP) /* * Access to IFN's to help with src-addr-selection */ /* This could return VOID if the index works but for BSD we provide both. */ #define SCTP_GET_IFN_VOID_FROM_ROUTE(ro) (void *)ro->ro_nh->nh_ifp #define SCTP_GET_IF_INDEX_FROM_ROUTE(ro) (ro)->ro_nh->nh_ifp->if_index #define SCTP_ROUTE_HAS_VALID_IFN(ro) ((ro)->ro_nh && (ro)->ro_nh->nh_ifp) /* * general memory allocation */ #define SCTP_MALLOC(var, type, size, name) \ do { \ var = (type)malloc(size, name, M_NOWAIT); \ } while (0) #define SCTP_FREE(var, type) free(var, type) -#define SCTP_MALLOC_SONAME(var, type, size) \ - do { \ - var = (type)malloc(size, M_SONAME, M_WAITOK | M_ZERO); \ - } while (0) - -#define SCTP_FREE_SONAME(var) free(var, M_SONAME) - #define SCTP_PROCESS_STRUCT struct proc * /* * zone allocation functions */ #include /* SCTP_ZONE_INIT: initialize the zone */ typedef struct uma_zone *sctp_zone_t; #define SCTP_ZONE_INIT(zone, name, size, number) { \ zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,\ 0); \ uma_zone_set_max(zone, number); \ } #define SCTP_ZONE_DESTROY(zone) uma_zdestroy(zone) /* SCTP_ZONE_GET: allocate element from the zone */ #define SCTP_ZONE_GET(zone, type) \ (type *)uma_zalloc(zone, M_NOWAIT); /* SCTP_ZONE_FREE: free element from the zone */ #define SCTP_ZONE_FREE(zone, element) \ uma_zfree(zone, element); #define SCTP_HASH_INIT(size, hashmark) hashinit_flags(size, M_PCB, hashmark, HASH_NOWAIT) #define SCTP_HASH_FREE(table, hashmark) hashdestroy(table, M_PCB, hashmark) #define SCTP_M_COPYM m_copym /* * timers */ #include typedef struct callout sctp_os_timer_t; #define SCTP_OS_TIMER_INIT(tmr) callout_init(tmr, 1) /* * NOTE: The next two shouldn't be called directly outside of sctp_timer_start() * and sctp_timer_stop(), since they don't handle incrementing/decrementing * relevant reference counts. */ #define SCTP_OS_TIMER_START callout_reset #define SCTP_OS_TIMER_STOP callout_stop #define SCTP_OS_TIMER_STOP_DRAIN callout_drain #define SCTP_OS_TIMER_PENDING callout_pending #define SCTP_OS_TIMER_ACTIVE callout_active #define SCTP_OS_TIMER_DEACTIVATE callout_deactivate #define sctp_get_tick_count() (ticks) #define SCTP_UNUSED __attribute__((unused)) /* * Functions */ /* Mbuf manipulation and access macros */ #define SCTP_BUF_LEN(m) (m->m_len) #define SCTP_BUF_NEXT(m) (m->m_next) #define SCTP_BUF_NEXT_PKT(m) (m->m_nextpkt) #define SCTP_BUF_RESV_UF(m, size) m->m_data += size #define SCTP_BUF_AT(m, size) m->m_data + size #define SCTP_BUF_IS_EXTENDED(m) (m->m_flags & M_EXT) #define SCTP_BUF_SIZE M_SIZE #define SCTP_BUF_TYPE(m) (m->m_type) #define SCTP_BUF_RECVIF(m) (m->m_pkthdr.rcvif) #define SCTP_BUF_PREPEND M_PREPEND #define SCTP_ALIGN_TO_END(m, len) M_ALIGN(m, len) #define SCTP_SNPRINTF(...) snprintf(__VA_ARGS__) /* We make it so if you have up to 4 threads * writing based on the default size of * the packet log 65 k, that would be * 4 16k packets before we would hit * a problem. */ #define SCTP_PKTLOG_WRITERS_NEED_LOCK 3 /*************************/ /* MTU */ /*************************/ #define SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index) ((ifn != NULL) ? ((struct ifnet *)ifn)->if_mtu : 0) #define SCTP_GATHER_MTU_FROM_ROUTE(sctp_ifa, sa, nh) ((uint32_t)((nh != NULL) ? nh->nh_mtu : 0)) /*************************/ /* These are for logging */ /*************************/ /* return the base ext data pointer */ #define SCTP_BUF_EXTEND_BASE(m) (m->m_ext.ext_buf) /* return the refcnt of the data pointer */ #define SCTP_BUF_EXTEND_REFCNT(m) (*m->m_ext.ext_cnt) /* return any buffer related flags, this is * used beyond logging for apple only. */ #define SCTP_BUF_GET_FLAGS(m) (m->m_flags) /* For BSD this just accesses the M_PKTHDR length * so it operates on an mbuf with hdr flag. Other * O/S's may have separate packet header and mbuf * chain pointers.. thus the macro. */ #define SCTP_HEADER_TO_CHAIN(m) (m) #define SCTP_DETACH_HEADER_FROM_CHAIN(m) #define SCTP_HEADER_LEN(m) ((m)->m_pkthdr.len) #define SCTP_GET_HEADER_FOR_OUTPUT(o_pak) 0 #define SCTP_RELEASE_HEADER(m) #define SCTP_RELEASE_PKT(m) sctp_m_freem(m) #define SCTP_ENABLE_UDP_CSUM(m) do { \ m->m_pkthdr.csum_flags = CSUM_UDP; \ m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); \ } while (0) #define SCTP_GET_PKT_VRFID(m, vrf_id) ((vrf_id = SCTP_DEFAULT_VRFID) != SCTP_DEFAULT_VRFID) /* Attach the chain of data into the sendable packet. */ #define SCTP_ATTACH_CHAIN(pak, m, packet_length) do { \ pak = m; \ pak->m_pkthdr.len = packet_length; \ } while(0) /* Other m_pkthdr type things */ #define SCTP_IS_IT_BROADCAST(dst, m) ((m->m_flags & M_PKTHDR) ? in_broadcast(dst, m->m_pkthdr.rcvif) : 0) #define SCTP_IS_IT_LOOPBACK(m) ((m->m_flags & M_PKTHDR) && ((m->m_pkthdr.rcvif == NULL) || (m->m_pkthdr.rcvif->if_type == IFT_LOOP))) /* This converts any input packet header * into the chain of data holders, for BSD * its a NOP. */ /* get the v6 hop limit */ #define SCTP_GET_HLIM(inp, ro) in6_selecthlim(&inp->ip_inp.inp, (ro ? (ro->ro_nh ? (ro->ro_nh->nh_ifp) : (NULL)) : (NULL))); /* is the endpoint v6only? */ #define SCTP_IPV6_V6ONLY(sctp_inpcb) ((sctp_inpcb)->ip_inp.inp.inp_flags & IN6P_IPV6_V6ONLY) /* is the socket non-blocking? */ #define SCTP_SO_IS_NBIO(so) ((so)->so_state & SS_NBIO) #define SCTP_SET_SO_NBIO(so) ((so)->so_state |= SS_NBIO) #define SCTP_CLEAR_SO_NBIO(so) ((so)->so_state &= ~SS_NBIO) /* get the socket type */ #define SCTP_SO_TYPE(so) ((so)->so_type) /* reserve sb space for a socket */ #define SCTP_SORESERVE(so, send, recv) soreserve(so, send, recv) /* wakeup a socket */ #define SCTP_SOWAKEUP(so) wakeup(&(so)->so_timeo) /* number of bytes ready to read */ #define SCTP_SBAVAIL(sb) sbavail(sb) /* clear the socket buffer state */ #define SCTP_SB_INCR(sb, incr) \ { \ atomic_add_int(&(sb)->sb_acc, incr); \ atomic_add_int(&(sb)->sb_ccc, incr); \ } #define SCTP_SB_DECR(sb, decr) \ { \ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_acc, decr); \ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_ccc, decr); \ } #define SCTP_SB_CLEAR(sb) \ (sb).sb_acc = 0; \ (sb).sb_ccc = 0; \ (sb).sb_mb = NULL; \ (sb).sb_mbcnt = 0; #define SCTP_SB_LIMIT_RCV(so) (SOLISTENING(so) ? so->sol_sbrcv_hiwat : so->so_rcv.sb_hiwat) #define SCTP_SB_LIMIT_SND(so) (SOLISTENING(so) ? so->sol_sbsnd_hiwat : so->so_snd.sb_hiwat) /* * routes, output, etc. */ typedef struct route sctp_route_t; #define SCTP_RTALLOC(ro, vrf_id, fibnum) \ { \ if ((ro)->ro_nh == NULL) { \ (ro)->ro_nh = rib_lookup(fibnum, &(ro)->ro_dst, NHR_REF, 0); \ } \ } /* * SCTP protocol specific mbuf flags. */ #define M_NOTIFICATION M_PROTO1 /* SCTP notification */ /* * IP output routines */ #define SCTP_IP_OUTPUT(result, o_pak, ro, _inp, vrf_id) \ { \ struct sctp_inpcb *local_inp = _inp; \ int o_flgs = IP_RAWOUTPUT; \ \ m_clrprotoflags(o_pak); \ if ((local_inp != NULL) && (local_inp->sctp_socket != NULL)) { \ o_flgs |= local_inp->sctp_socket->so_options & SO_DONTROUTE; \ } \ result = ip_output(o_pak, NULL, ro, o_flgs, 0, NULL); \ } #define SCTP_IP6_OUTPUT(result, o_pak, ro, ifp, _inp, vrf_id) \ { \ struct sctp_inpcb *local_inp = _inp; \ \ m_clrprotoflags(o_pak); \ if (local_inp != NULL) { \ INP_RLOCK(&local_inp->ip_inp.inp); \ result = ip6_output(o_pak, \ local_inp->ip_inp.inp.in6p_outputopts, \ (ro), 0, 0, ifp, NULL); \ INP_RUNLOCK(&local_inp->ip_inp.inp); \ } else { \ result = ip6_output(o_pak, NULL, (ro), 0, 0, ifp, NULL); \ } \ } struct mbuf * sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int allonebuf, int type); /* * SCTP AUTH */ #define SCTP_READ_RANDOM(buf, len) arc4rand(buf, len, 0) /* map standard crypto API names */ #define SCTP_SHA1_CTX SHA1_CTX #define SCTP_SHA1_INIT SHA1Init #define SCTP_SHA1_UPDATE SHA1Update #define SCTP_SHA1_FINAL(x,y) SHA1Final((caddr_t)x, y) #define SCTP_SHA256_CTX SHA256_CTX #define SCTP_SHA256_INIT SHA256_Init #define SCTP_SHA256_UPDATE SHA256_Update #define SCTP_SHA256_FINAL(x,y) SHA256_Final((caddr_t)x, y) #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1) #if defined(INVARIANTS) #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ { \ int32_t oldval; \ oldval = atomic_fetchadd_int(addr, -val); \ if (oldval < val) { \ panic("Counter goes negative"); \ } \ } #else #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ { \ int32_t oldval; \ oldval = atomic_fetchadd_int(addr, -val); \ if (oldval < val) { \ *addr = 0; \ } \ } #endif #define SCTP_IS_LISTENING(inp) ((inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) != 0) int sctp_syscalls_init(void); int sctp_syscalls_uninit(void); #endif diff --git a/sys/netinet/sctp_usrreq.c b/sys/netinet/sctp_usrreq.c index 8fb96db84f95..7fa4559108fd 100644 --- a/sys/netinet/sctp_usrreq.c +++ b/sys/netinet/sctp_usrreq.c @@ -1,7557 +1,7547 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #ifdef INET6 #include #endif #include #include #include #include #include #include #include #include #include #include #include extern const struct sctp_cc_functions sctp_cc_functions[]; extern const struct sctp_ss_functions sctp_ss_functions[]; static void sctp_init(void *arg SCTP_UNUSED) { u_long sb_max_adj; /* Initialize and modify the sysctled variables */ sctp_init_sysctls(); if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); /* * Allow a user to take no more than 1/2 the number of clusters or * the SB_MAX, whichever is smaller, for the send window. */ sb_max_adj = (u_long)((u_quad_t)(SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, (((uint32_t)nmbclusters / 2) * MCLBYTES)); /* * Now for the recv window, should we take the same amount? or * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For * now I will just copy. */ SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); SCTP_BASE_VAR(first_time) = 0; SCTP_BASE_VAR(sctp_pcb_initialized) = 0; sctp_pcb_init(); #if defined(SCTP_PACKET_LOGGING) SCTP_BASE_VAR(packet_log_writers) = 0; SCTP_BASE_VAR(packet_log_end) = 0; memset(&SCTP_BASE_VAR(packet_log_buffer), 0, SCTP_PACKET_LOG_SIZE); #endif SCTP_BASE_VAR(eh_tag) = EVENTHANDLER_REGISTER(rt_addrmsg, sctp_addr_change_event_handler, NULL, EVENTHANDLER_PRI_FIRST); } VNET_SYSINIT(sctp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, sctp_init, NULL); #ifdef VIMAGE static void sctp_finish(void *unused __unused) { EVENTHANDLER_DEREGISTER(rt_addrmsg, SCTP_BASE_VAR(eh_tag)); sctp_pcb_finish(); } VNET_SYSUNINIT(sctp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, sctp_finish, NULL); #endif void sctp_pathmtu_adjustment(struct sctp_tcb *stcb, uint32_t mtu, bool resend) { struct sctp_association *asoc; struct sctp_tmit_chunk *chk; uint32_t overhead; asoc = &stcb->asoc; KASSERT(mtu < asoc->smallest_mtu, ("Currently only reducing association MTU %u supported (MTU %u)", asoc->smallest_mtu, mtu)); asoc->smallest_mtu = mtu; if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { overhead = SCTP_MIN_OVERHEAD; } else { overhead = SCTP_MIN_V4_OVERHEAD; } if (asoc->idata_supported) { if (sctp_auth_is_required_chunk(SCTP_IDATA, asoc->peer_auth_chunks)) { overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id); } } else { if (sctp_auth_is_required_chunk(SCTP_DATA, asoc->peer_auth_chunks)) { overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id); } } KASSERT(overhead % 4 == 0, ("overhead (%u) not a multiple of 4", overhead)); TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { if (((uint32_t)chk->send_size + overhead) > mtu) { chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; } } TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { if (((uint32_t)chk->send_size + overhead) > mtu) { chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; if (resend && chk->sent < SCTP_DATAGRAM_RESEND) { /* * If requested, mark the chunk for * immediate resend, since we sent it being * too big. */ sctp_flight_size_decrease(chk); sctp_total_flight_decrease(stcb, chk); chk->sent = SCTP_DATAGRAM_RESEND; sctp_ucount_incr(asoc->sent_queue_retran_cnt); chk->rec.data.doing_fast_retransmit = 0; if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, chk->whoTo->flight_size, chk->book_size, (uint32_t)(uintptr_t)chk->whoTo, chk->rec.data.tsn); } /* Clear any time, so NO RTT is being done. */ if (chk->do_rtt == 1) { chk->do_rtt = 0; chk->whoTo->rto_needed = 1; } } } } } #ifdef INET void sctp_notify(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint8_t icmp_type, uint8_t icmp_code, uint16_t ip_len, uint32_t next_mtu) { int timer_stopped; if (icmp_type != ICMP_UNREACH) { /* We only care about unreachable */ SCTP_TCB_UNLOCK(stcb); return; } if ((icmp_code == ICMP_UNREACH_NET) || (icmp_code == ICMP_UNREACH_HOST) || (icmp_code == ICMP_UNREACH_NET_UNKNOWN) || (icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || (icmp_code == ICMP_UNREACH_ISOLATED) || (icmp_code == ICMP_UNREACH_NET_PROHIB) || (icmp_code == ICMP_UNREACH_HOST_PROHIB) || (icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { /* Mark the net unreachable. */ if (net->dest_state & SCTP_ADDR_REACHABLE) { /* OK, that destination is NOT reachable. */ net->dest_state &= ~SCTP_ADDR_REACHABLE; net->dest_state &= ~SCTP_ADDR_PF; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, (void *)net, SCTP_SO_NOT_LOCKED); } SCTP_TCB_UNLOCK(stcb); } else if ((icmp_code == ICMP_UNREACH_PROTOCOL) || (icmp_code == ICMP_UNREACH_PORT)) { /* Treat it like an ABORT. */ sctp_abort_notification(stcb, true, false, 0, NULL, SCTP_SO_NOT_LOCKED); (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); /* no need to unlock here, since the TCB is gone */ } else if (icmp_code == ICMP_UNREACH_NEEDFRAG) { if (net->dest_state & SCTP_ADDR_NO_PMTUD) { SCTP_TCB_UNLOCK(stcb); return; } /* Find the next (smaller) MTU */ if (next_mtu == 0) { /* * Old type router that does not tell us what the * next MTU is. Rats we will have to guess (in a * educated fashion of course). */ next_mtu = sctp_get_prev_mtu(ip_len); } /* Stop the PMTU timer. */ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { timer_stopped = 1; sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); } else { timer_stopped = 0; } /* Update the path MTU. */ if (net->port) { next_mtu -= sizeof(struct udphdr); } if (net->mtu > next_mtu) { net->mtu = next_mtu; if (net->port) { sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu + sizeof(struct udphdr)); } else { sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu); } } /* Update the association MTU */ if (stcb->asoc.smallest_mtu > next_mtu) { sctp_pathmtu_adjustment(stcb, next_mtu, true); } /* Finally, start the PMTU timer if it was running before. */ if (timer_stopped) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } SCTP_TCB_UNLOCK(stcb); } else { SCTP_TCB_UNLOCK(stcb); } } void sctp_ctlinput(struct icmp *icmp) { struct ip *inner_ip, *outer_ip; struct sctphdr *sh; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; struct sctp_init_chunk *ch; struct sockaddr_in src, dst; if (icmp_errmap(icmp) == 0) return; outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); inner_ip = &icmp->icmp_ip; sh = (struct sctphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); memset(&src, 0, sizeof(struct sockaddr_in)); src.sin_family = AF_INET; src.sin_len = sizeof(struct sockaddr_in); src.sin_port = sh->src_port; src.sin_addr = inner_ip->ip_src; memset(&dst, 0, sizeof(struct sockaddr_in)); dst.sin_family = AF_INET; dst.sin_len = sizeof(struct sockaddr_in); dst.sin_port = sh->dest_port; dst.sin_addr = inner_ip->ip_dst; /* * 'dst' holds the dest of the packet that failed to be sent. 'src' * holds our local endpoint address. Thus we reverse the dst and the * src in the lookup. */ inp = NULL; net = NULL; stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, (struct sockaddr *)&src, &inp, &net, 1, SCTP_DEFAULT_VRFID); if ((stcb != NULL) && (net != NULL) && (inp != NULL)) { /* Check the verification tag */ if (ntohl(sh->v_tag) != 0) { /* * This must be the verification tag used for * sending out packets. We don't consider packets * reflecting the verification tag. */ if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { SCTP_TCB_UNLOCK(stcb); return; } } else { if (ntohs(outer_ip->ip_len) >= sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + 20) { /* * In this case we can check if we got an * INIT chunk and if the initiate tag * matches. */ ch = (struct sctp_init_chunk *)(sh + 1); if ((ch->ch.chunk_type != SCTP_INITIATION) || (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { SCTP_TCB_UNLOCK(stcb); return; } } else { SCTP_TCB_UNLOCK(stcb); return; } } sctp_notify(inp, stcb, net, icmp->icmp_type, icmp->icmp_code, ntohs(inner_ip->ip_len), (uint32_t)ntohs(icmp->icmp_nextmtu)); } else { if ((stcb == NULL) && (inp != NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } if (stcb) { SCTP_TCB_UNLOCK(stcb); } } } #endif static int sctp_getcred(SYSCTL_HANDLER_ARGS) { struct xucred xuc; struct sockaddr_in addrs[2]; struct sctp_inpcb *inp; struct sctp_nets *net; struct sctp_tcb *stcb; int error; uint32_t vrf_id; /* FIX, for non-bsd is this right? */ vrf_id = SCTP_DEFAULT_VRFID; error = priv_check(req->td, PRIV_NETINET_GETCRED); if (error) return (error); error = SYSCTL_IN(req, addrs, sizeof(addrs)); if (error) return (error); stcb = sctp_findassociation_addr_sa(sintosa(&addrs[1]), sintosa(&addrs[0]), &inp, &net, 1, vrf_id); if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { if ((inp != NULL) && (stcb == NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); goto cred_can_cont; } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; goto out; } SCTP_TCB_UNLOCK(stcb); /* * We use the write lock here, only since in the error leg we need * it. If we used RLOCK, then we would have to * wlock/decr/unlock/rlock. Which in theory could create a hole. * Better to use higher wlock. */ SCTP_INP_WLOCK(inp); cred_can_cont: error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); if (error) { SCTP_INP_WUNLOCK(inp); goto out; } cru2x(inp->sctp_socket->so_cred, &xuc); SCTP_INP_WUNLOCK(inp); error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); out: return (error); } SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); void sctp_abort(struct socket *so) { struct epoch_tracker et; struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { return; } SCTP_INP_WLOCK(inp); NET_EPOCH_ENTER(et); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)) { inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 16); #endif SCTP_INP_WUNLOCK(inp); sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); SOCK_LOCK(so); KASSERT(!SOLISTENING(so), ("sctp_abort: called on listening socket %p", so)); SCTP_SB_CLEAR(so->so_snd); SCTP_SB_CLEAR(so->so_rcv); /* Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { SCTP_INP_WUNLOCK(inp); } NET_EPOCH_EXIT(et); } #ifdef INET static int sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED) { struct sctp_inpcb *inp; struct inpcb *ip_inp; int error; uint32_t vrf_id = SCTP_DEFAULT_VRFID; inp = (struct sctp_inpcb *)so->so_pcb; if (inp != NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); if (error) { return (error); } } error = sctp_inpcb_alloc(so, vrf_id); if (error) { return (error); } inp = (struct sctp_inpcb *)so->so_pcb; SCTP_INP_WLOCK(inp); inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ ip_inp = &inp->ip_inp.inp; ip_inp->inp_vflag |= INP_IPV4; ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl); SCTP_INP_WUNLOCK(inp); return (0); } static int sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) { struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if (addr != NULL) { if ((addr->sa_family != AF_INET) || (addr->sa_len != sizeof(struct sockaddr_in))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } } return (sctp_inpcb_bind(so, addr, NULL, p)); } #endif void sctp_close(struct socket *so) { struct epoch_tracker et; struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) return; /* * Inform all the lower layer assoc that we are done. */ SCTP_INP_WLOCK(inp); NET_EPOCH_ENTER(et); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP; if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || (SCTP_SBAVAIL(&so->so_rcv) > 0)) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 13); #endif SCTP_INP_WUNLOCK(inp); sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); } else { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 14); #endif SCTP_INP_WUNLOCK(inp); sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); } /* * The socket is now detached, no matter what the state of * the SCTP association. */ SOCK_LOCK(so); if (!SOLISTENING(so)) { SCTP_SB_CLEAR(so->so_snd); SCTP_SB_CLEAR(so->so_rcv); } /* Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { SCTP_INP_WUNLOCK(inp); } NET_EPOCH_EXIT(et); } int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p); int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p) { struct sctp_inpcb *inp; int error; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { if (control) { sctp_m_freem(control); control = NULL; } SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); sctp_m_freem(m); return (EINVAL); } /* Got to have an to address if we are NOT a connected socket */ if ((addr == NULL) && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))) { goto connected_type; } error = 0; if (addr == NULL) { SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); error = EDESTADDRREQ; } else if (addr->sa_family != AF_INET) { SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); error = EAFNOSUPPORT; } else if (addr->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if (error != 0) { sctp_m_freem(m); if (control) { sctp_m_freem(control); control = NULL; } return (error); } connected_type: /* now what about control */ if (control) { if (inp->control) { sctp_m_freem(inp->control); inp->control = NULL; } inp->control = control; } /* Place the data */ if (inp->pkt) { SCTP_BUF_NEXT(inp->pkt_last) = m; inp->pkt_last = m; } else { inp->pkt_last = inp->pkt = m; } if ( /* FreeBSD uses a flag passed */ ((flags & PRUS_MORETOCOME) == 0) ) { /* * note with the current version this code will only be used * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for * re-defining sosend to use the sctp_sosend. One can * optionally switch back to this code (by changing back the * definitions) but this is not advisable. This code is used * by FreeBSD when sending a file with sendfile() though. */ struct epoch_tracker et; int ret; NET_EPOCH_ENTER(et); ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); NET_EPOCH_EXIT(et); inp->pkt = NULL; inp->control = NULL; return (ret); } else { return (0); } } int sctp_disconnect(struct socket *so) { struct epoch_tracker et; struct sctp_inpcb *inp; struct sctp_association *asoc; struct sctp_tcb *stcb; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); return (ENOTCONN); } SCTP_INP_RLOCK(inp); KASSERT(inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE || inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL, ("Not a one-to-one style socket")); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); return (ENOTCONN); } SCTP_TCB_LOCK(stcb); asoc = &stcb->asoc; if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { /* We are about to be freed, out of here */ SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } NET_EPOCH_ENTER(et); if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || (SCTP_SBAVAIL(&so->so_rcv) > 0)) { if (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) { /* Left with Data unread */ struct mbuf *op_err; op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); SCTP_STAT_INCR_COUNTER32(sctps_aborted); } SCTP_INP_RUNLOCK(inp); if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); /* No unlock tcb assoc is gone */ NET_EPOCH_EXIT(et); return (0); } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->stream_queue_cnt == 0)) { /* there is nothing queued to send, so done */ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { goto abort_anyway; } if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { /* only send SHUTDOWN 1st time thru */ struct sctp_nets *netp; if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); sctp_stop_timers_for_shutdown(stcb); if (stcb->asoc.alternate) { netp = stcb->asoc.alternate; } else { netp = stcb->asoc.primary_destination; } sctp_send_shutdown(stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL); sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); } } else { /* * we still got (or just got) data to send, so set * SHUTDOWN_PENDING */ /* * XXX sockets draft says that SCTP_EOF should be sent with * no data. currently, we will allow user data to be sent * first and move to SHUTDOWN-PENDING */ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; abort_anyway: op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); SCTP_STAT_INCR_COUNTER32(sctps_aborted); if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } SCTP_INP_RUNLOCK(inp); (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); NET_EPOCH_EXIT(et); return (0); } else { sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); } } soisdisconnecting(so); NET_EPOCH_EXIT(et); SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } int sctp_flush(struct socket *so, int how) { struct epoch_tracker et; struct sctp_tcb *stcb; struct sctp_queued_to_read *control, *ncontrol; struct sctp_inpcb *inp; struct mbuf *m, *op_err; bool need_to_abort = false; /* * For 1-to-1 style sockets, flush the read queue and trigger an * ungraceful shutdown of the association, if and only if user * messages are lost. Loosing notifications does not need to be * signalled to the peer. */ if (how == PRU_FLUSH_WR) { /* This function is only relevant for the read directions. */ return (0); } inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } SCTP_INP_WLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { /* For 1-to-many style sockets this function does nothing. */ SCTP_INP_WUNLOCK(inp); return (0); } stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb != NULL) { SCTP_TCB_LOCK(stcb); } SCTP_INP_READ_LOCK(inp); inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ; SOCK_LOCK(so); TAILQ_FOREACH_SAFE(control, &inp->read_queue, next, ncontrol) { if ((control->spec_flags & M_NOTIFICATION) == 0) { need_to_abort = true; } TAILQ_REMOVE(&inp->read_queue, control, next); control->on_read_q = 0; for (m = control->data; m; m = SCTP_BUF_NEXT(m)) { sctp_sbfree(control, control->stcb, &so->so_rcv, m); } if (control->on_strm_q == 0) { sctp_free_remote_addr(control->whoFrom); if (control->data) { sctp_m_freem(control->data); control->data = NULL; } sctp_free_a_readq(stcb, control); } else { stcb->asoc.size_on_all_streams += control->length; } } SOCK_UNLOCK(so); SCTP_INP_READ_UNLOCK(inp); if (need_to_abort && (stcb != NULL)) { inp->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; SCTP_INP_WUNLOCK(inp); op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); NET_EPOCH_ENTER(et); sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_LOCKED); NET_EPOCH_EXIT(et); return (ECONNABORTED); } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } SCTP_INP_WUNLOCK(inp); return (0); } int sctp_shutdown(struct socket *so) { struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } SCTP_INP_RLOCK(inp); /* For UDP model this is a invalid call */ if (!((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { /* Restore the flags that the soshutdown took away. */ SOCKBUF_LOCK(&so->so_rcv); so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; SOCKBUF_UNLOCK(&so->so_rcv); /* This proc will wakeup for read and do nothing (I hope) */ SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); return (EOPNOTSUPP); } else { /* * Ok, if we reach here its the TCP model and it is either a * SHUT_WR or SHUT_RDWR. This means we put the shutdown flag * against it. */ struct epoch_tracker et; struct sctp_tcb *stcb; struct sctp_association *asoc; struct sctp_nets *netp; if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { SCTP_INP_RUNLOCK(inp); return (ENOTCONN); } socantsendmore(so); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { /* * Ok, we hit the case that the shutdown call was * made after an abort or something. Nothing to do * now. */ SCTP_INP_RUNLOCK(inp); return (0); } SCTP_TCB_LOCK(stcb); asoc = &stcb->asoc; if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } if ((SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) && (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_ECHOED) && (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN)) { /* * If we are not in or before ESTABLISHED, there is * no protocol action required. */ SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } NET_EPOCH_ENTER(et); if (stcb->asoc.alternate) { netp = stcb->asoc.alternate; } else { netp = stcb->asoc.primary_destination; } if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) && TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->stream_queue_cnt == 0)) { if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { goto abort_anyway; } /* there is nothing queued to send, so I'm done... */ SCTP_STAT_DECR_GAUGE32(sctps_currestab); SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); sctp_stop_timers_for_shutdown(stcb); sctp_send_shutdown(stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL); } else { /* * We still got (or just got) data to send, so set * SHUTDOWN_PENDING. */ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; abort_anyway: op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; SCTP_INP_RUNLOCK(inp); sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_LOCKED); NET_EPOCH_EXIT(et); return (0); } } /* * XXX: Why do this in the case where we have still data * queued? */ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); NET_EPOCH_EXIT(et); return (0); } } /* * copies a "user" presentable address and removes embedded scope, etc. * returns 0 on success, 1 on error */ static uint32_t sctp_fill_user_address(struct sockaddr *dst, struct sockaddr *src) { #ifdef INET6 struct sockaddr_in6 lsa6; src = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)src, &lsa6); #endif memcpy(dst, src, src->sa_len); return (0); } static size_t sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, struct sctp_tcb *stcb, size_t limit, struct sockaddr *addr, uint32_t vrf_id) { struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa; size_t actual; int loopback_scope; #if defined(INET) int ipv4_local_scope, ipv4_addr_legal; #endif #if defined(INET6) int local_scope, site_scope, ipv6_addr_legal; #endif struct sctp_vrf *vrf; SCTP_IPI_ADDR_LOCK_ASSERT(); actual = 0; if (limit == 0) return (actual); if (stcb) { /* Turn on all the appropriate scope */ loopback_scope = stcb->asoc.scope.loopback_scope; #if defined(INET) ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; #endif #if defined(INET6) local_scope = stcb->asoc.scope.local_scope; site_scope = stcb->asoc.scope.site_scope; ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; #endif } else { /* Use generic values for endpoints. */ loopback_scope = 1; #if defined(INET) ipv4_local_scope = 1; #endif #if defined(INET6) local_scope = 1; site_scope = 1; #endif if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { #if defined(INET6) ipv6_addr_legal = 1; #endif #if defined(INET) if (SCTP_IPV6_V6ONLY(inp)) { ipv4_addr_legal = 0; } else { ipv4_addr_legal = 1; } #endif } else { #if defined(INET6) ipv6_addr_legal = 0; #endif #if defined(INET) ipv4_addr_legal = 1; #endif } } vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { return (0); } if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { if ((loopback_scope == 0) && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { /* Skip loopback if loopback_scope not set */ continue; } LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { if (stcb) { /* * For the BOUND-ALL case, the list * associated with a TCB is Always * considered a reverse list.. i.e. * it lists addresses that are NOT * part of the association. If this * is one of those we must skip it. */ if (sctp_is_addr_restricted(stcb, sctp_ifa)) { continue; } } switch (sctp_ifa->address.sa.sa_family) { #ifdef INET case AF_INET: if (ipv4_addr_legal) { struct sockaddr_in *sin; sin = &sctp_ifa->address.sin; if (sin->sin_addr.s_addr == 0) { /* * we skip * unspecified * addresses */ continue; } if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; } if ((ipv4_local_scope == 0) && (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { continue; } #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { if (actual + sizeof(struct sockaddr_in6) > limit) { return (actual); } in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)addr); ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport; addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in6)); actual += sizeof(struct sockaddr_in6); } else { #endif if (actual + sizeof(struct sockaddr_in) > limit) { return (actual); } memcpy(addr, sin, sizeof(struct sockaddr_in)); ((struct sockaddr_in *)addr)->sin_port = inp->sctp_lport; addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in)); actual += sizeof(struct sockaddr_in); #ifdef INET6 } #endif } else { continue; } break; #endif #ifdef INET6 case AF_INET6: if (ipv6_addr_legal) { struct sockaddr_in6 *sin6; sin6 = &sctp_ifa->address.sin6; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* * we skip * unspecified * addresses */ continue; } if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; } if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { if (local_scope == 0) continue; if (sin6->sin6_scope_id == 0) { if (sa6_recoverscope(sin6) != 0) /* * * bad * link * * local * * address */ continue; } } if ((site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { continue; } if (actual + sizeof(struct sockaddr_in6) > limit) { return (actual); } memcpy(addr, sin6, sizeof(struct sockaddr_in6)); ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport; addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in6)); actual += sizeof(struct sockaddr_in6); } else { continue; } break; #endif default: /* TSNH */ break; } } } } else { struct sctp_laddr *laddr; size_t sa_len; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (stcb) { if (sctp_is_addr_restricted(stcb, laddr->ifa)) { continue; } } sa_len = laddr->ifa->address.sa.sa_len; if (actual + sa_len > limit) { return (actual); } if (sctp_fill_user_address(addr, &laddr->ifa->address.sa)) continue; switch (laddr->ifa->address.sa.sa_family) { #ifdef INET case AF_INET: ((struct sockaddr_in *)addr)->sin_port = inp->sctp_lport; break; #endif #ifdef INET6 case AF_INET6: ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport; break; #endif default: /* TSNH */ break; } addr = (struct sockaddr *)((caddr_t)addr + sa_len); actual += sa_len; } } return (actual); } static size_t sctp_fill_up_addresses(struct sctp_inpcb *inp, struct sctp_tcb *stcb, size_t limit, struct sockaddr *addr) { size_t size; SCTP_IPI_ADDR_RLOCK(); /* fill up addresses for the endpoint's default vrf */ size = sctp_fill_up_addresses_vrf(inp, stcb, limit, addr, inp->def_vrf_id); SCTP_IPI_ADDR_RUNLOCK(); return (size); } static size_t sctp_max_size_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) { struct sctp_vrf *vrf; size_t size; /* * In both sub-set bound an bound_all cases we return the size of * the maximum number of addresses that you could get. In reality * the sub-set bound may have an exclusion list for a given TCB or * in the bound-all case a TCB may NOT include the loopback or other * addresses as well. */ SCTP_IPI_ADDR_LOCK_ASSERT(); vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { return (0); } size = 0; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa; LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { /* Count them if they are the right type */ switch (sctp_ifa->address.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) size += sizeof(struct sockaddr_in6); else size += sizeof(struct sockaddr_in); #else size += sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6: size += sizeof(struct sockaddr_in6); break; #endif default: break; } } } } else { struct sctp_laddr *laddr; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { switch (laddr->ifa->address.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) size += sizeof(struct sockaddr_in6); else size += sizeof(struct sockaddr_in); #else size += sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6: size += sizeof(struct sockaddr_in6); break; #endif default: break; } } } return (size); } static size_t sctp_max_size_addresses(struct sctp_inpcb *inp) { size_t size; SCTP_IPI_ADDR_RLOCK(); /* Maximum size of all addresses for the endpoint's default VRF */ size = sctp_max_size_addresses_vrf(inp, inp->def_vrf_id); SCTP_IPI_ADDR_RUNLOCK(); return (size); } static int sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, size_t optsize, void *p, int delay) { int error; int creat_lock_on = 0; struct sctp_tcb *stcb = NULL; struct sockaddr *sa; unsigned int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; uint32_t vrf_id; sctp_assoc_t *a_id; SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); return (EADDRINUSE); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); SCTP_INP_RUNLOCK(inp); } if (stcb) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); return (EALREADY); } SCTP_INP_INCR_REF(inp); SCTP_ASOC_CREATE_LOCK(inp); creat_lock_on = 1; if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); error = EFAULT; goto out_now; } totaddrp = (unsigned int *)optval; totaddr = *totaddrp; sa = (struct sockaddr *)(totaddrp + 1); error = sctp_connectx_helper_find(inp, sa, totaddr, &num_v4, &num_v6, (unsigned int)(optsize - sizeof(int))); if (error != 0) { /* Already have or am bring up an association */ SCTP_ASOC_CREATE_UNLOCK(inp); creat_lock_on = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); goto out_now; } #ifdef INET6 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && (num_v6 > 0)) { error = EINVAL; goto out_now; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && (num_v4 > 0)) { if (SCTP_IPV6_V6ONLY(inp)) { /* * if IPV6_V6ONLY flag, ignore connections destined * to a v4 addr or v4-mapped addr */ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_now; } } #endif /* INET6 */ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ error = sctp_inpcb_bind(so, NULL, NULL, p); if (error) { goto out_now; } } /* FIX ME: do we want to pass in a vrf on the connect call? */ vrf_id = inp->def_vrf_id; /* We are GOOD to go */ stcb = sctp_aloc_assoc_connected(inp, sa, &error, 0, 0, vrf_id, inp->sctp_ep.pre_open_stream_count, inp->sctp_ep.port, (struct thread *)p, SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Gak! no memory */ goto out_now; } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); /* move to second address */ switch (sa->sa_family) { #ifdef INET case AF_INET: sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); break; #endif #ifdef INET6 case AF_INET6: sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); break; #endif default: break; } error = 0; sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); /* Fill in the return id */ if (error) { goto out_now; } a_id = (sctp_assoc_t *)optval; *a_id = sctp_get_associd(stcb); if (delay) { /* doing delayed connection */ stcb->asoc.delayed_connection = 1; sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); } else { (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); } SCTP_TCB_UNLOCK(stcb); out_now: if (creat_lock_on) { SCTP_ASOC_CREATE_UNLOCK(inp); } SCTP_INP_DECR_REF(inp); return (error); } #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ SCTP_INP_RLOCK(inp); \ stcb = LIST_FIRST(&inp->sctp_asoc_list); \ if (stcb) { \ SCTP_TCB_LOCK(stcb); \ } \ SCTP_INP_RUNLOCK(inp); \ } else if (assoc_id > SCTP_ALL_ASSOC) { \ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ if (stcb == NULL) { \ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ error = ENOENT; \ break; \ } \ } else { \ stcb = NULL; \ } \ } #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ if (size < sizeof(type)) { \ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ error = EINVAL; \ break; \ } else { \ destp = (type *)srcp; \ } \ } static int sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, void *p) { struct sctp_inpcb *inp = NULL; int error, val = 0; struct sctp_tcb *stcb = NULL; if (optval == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return EINVAL; } error = 0; switch (optname) { case SCTP_NODELAY: case SCTP_AUTOCLOSE: case SCTP_EXPLICIT_EOR: case SCTP_AUTO_ASCONF: case SCTP_DISABLE_FRAGMENTS: case SCTP_I_WANT_MAPPED_V4_ADDR: case SCTP_USE_EXT_RCVINFO: SCTP_INP_RLOCK(inp); switch (optname) { case SCTP_DISABLE_FRAGMENTS: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); break; case SCTP_I_WANT_MAPPED_V4_ADDR: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); break; case SCTP_AUTO_ASCONF: if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* only valid for bound all sockets */ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto flags_out; } break; case SCTP_EXPLICIT_EOR: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); break; case SCTP_NODELAY: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); break; case SCTP_USE_EXT_RCVINFO: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); break; case SCTP_AUTOCLOSE: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) val = sctp_ticks_to_secs(inp->sctp_ep.auto_close_time); else val = 0; break; default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; } /* end switch (sopt->sopt_name) */ if (*optsize < sizeof(val)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } flags_out: SCTP_INP_RUNLOCK(inp); if (error == 0) { /* return the option value */ *(int *)optval = val; *optsize = sizeof(val); } break; case SCTP_GET_PACKET_LOG: { #ifdef SCTP_PACKET_LOGGING uint8_t *target; int ret; SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); ret = sctp_copy_out_packet_log(target, (int)*optsize); *optsize = ret; #else SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; #endif break; } case SCTP_REUSE_PORT: { uint32_t *value; if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { /* Can't do this for a 1-m socket */ error = EINVAL; break; } SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); *optsize = sizeof(uint32_t); break; } case SCTP_PARTIAL_DELIVERY_POINT: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); *value = inp->partial_delivery_point; *optsize = sizeof(uint32_t); break; } case SCTP_FRAGMENT_INTERLEAVE: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { *value = SCTP_FRAG_LEVEL_2; } else { *value = SCTP_FRAG_LEVEL_1; } } else { *value = SCTP_FRAG_LEVEL_0; } *optsize = sizeof(uint32_t); break; } case SCTP_INTERLEAVING_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.idata_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); if (inp->idata_supported) { av->assoc_value = 1; } else { av->assoc_value = 0; } SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_CMT_ON_OFF: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.sctp_cmt_on_off; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_cmt_on_off; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_PLUGGABLE_CC: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.congestion_control_module; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_ep.sctp_default_cc_module; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_CC_OPTION: { struct sctp_cc_option *cc_opt; SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, *optsize); SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id); if (stcb == NULL) { error = EINVAL; } else { if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) { error = ENOTSUP; } else { error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 0, cc_opt); *optsize = sizeof(struct sctp_cc_option); } SCTP_TCB_UNLOCK(stcb); } break; } case SCTP_STREAM_SCHEDULER: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.stream_scheduling_module; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_ep.sctp_default_ss_module; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_STREAM_SCHEDULER_VALUE: { struct sctp_stream_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { if ((av->stream_id >= stcb->asoc.streamoutcnt) || (stcb->asoc.ss_functions.sctp_ss_get_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], &av->stream_value) < 0)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { *optsize = sizeof(struct sctp_stream_value); } SCTP_TCB_UNLOCK(stcb); } else { /* * Can't get stream value without * association */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_GET_ADDR_LEN: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); error = EINVAL; #ifdef INET if (av->assoc_value == AF_INET) { av->assoc_value = sizeof(struct sockaddr_in); error = 0; } #endif #ifdef INET6 if (av->assoc_value == AF_INET6) { av->assoc_value = sizeof(struct sockaddr_in6); error = 0; } #endif if (error) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_GET_ASSOC_NUMBER: { uint32_t *value, cnt; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* Can't do this for a 1-1 socket */ error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } cnt = 0; LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { cnt++; } SCTP_INP_RUNLOCK(inp); *value = cnt; *optsize = sizeof(uint32_t); break; } case SCTP_GET_ASSOC_ID_LIST: { struct sctp_assoc_ids *ids; uint32_t at; size_t limit; SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* Can't do this for a 1-1 socket */ error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } at = 0; limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { if (at < limit) { ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); if (at == 0) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } SCTP_INP_RUNLOCK(inp); if (error == 0) { ids->gaids_number_of_ids = at; *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t)); } break; } case SCTP_CONTEXT: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.context; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_context; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_VRF_ID: { uint32_t *default_vrfid; SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); *default_vrfid = inp->def_vrf_id; *optsize = sizeof(uint32_t); break; } case SCTP_GET_ASOC_VRF: { struct sctp_assoc_value *id; SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, id->assoc_id); if (stcb == NULL) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { id->assoc_value = stcb->asoc.vrf_id; SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_GET_VRF_IDS: { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; break; } case SCTP_GET_NONCE_VALUES: { struct sctp_get_nonce_values *gnv; SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); if (stcb) { gnv->gn_peers_tag = stcb->asoc.peer_vtag; gnv->gn_local_tag = stcb->asoc.my_vtag; SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_get_nonce_values); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); error = ENOTCONN; } break; } case SCTP_DELAYED_SACK: { struct sctp_sack_info *sack; SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); if (stcb) { sack->sack_delay = stcb->asoc.delayed_ack; sack->sack_freq = stcb->asoc.sack_freq; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (sack->sack_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); sack->sack_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); sack->sack_freq = inp->sctp_ep.sctp_sack_freq; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_sack_info); } break; } case SCTP_GET_SNDBUF_USE: { struct sctp_sockstat *ss; SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); if (stcb) { ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + stcb->asoc.size_on_all_streams); SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_sockstat); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); error = ENOTCONN; } break; } case SCTP_MAX_BURST: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.max_burst; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_ep.max_burst; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_MAXSEG: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.sctp_frag_point; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_frag_point; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_GET_STAT_LOG: error = sctp_fill_stat_log(optval, optsize); break; case SCTP_EVENTS: { struct sctp_event_subscribe *events; SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); memset(events, 0, sizeof(struct sctp_event_subscribe)); SCTP_INP_RLOCK(inp); if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) events->sctp_data_io_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) events->sctp_association_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) events->sctp_address_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) events->sctp_send_failure_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) events->sctp_peer_error_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) events->sctp_shutdown_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) events->sctp_partial_delivery_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) events->sctp_adaptation_layer_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) events->sctp_authentication_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT)) events->sctp_sender_dry_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) events->sctp_stream_reset_event = 1; SCTP_INP_RUNLOCK(inp); *optsize = sizeof(struct sctp_event_subscribe); break; } case SCTP_ADAPTATION_LAYER: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); SCTP_INP_RLOCK(inp); *value = inp->sctp_ep.adaptation_layer_indicator; SCTP_INP_RUNLOCK(inp); *optsize = sizeof(uint32_t); break; } case SCTP_SET_INITIAL_DBG_SEQ: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); SCTP_INP_RLOCK(inp); *value = inp->sctp_ep.initial_sequence_debug; SCTP_INP_RUNLOCK(inp); *optsize = sizeof(uint32_t); break; } case SCTP_GET_LOCAL_ADDR_SIZE: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); SCTP_INP_RLOCK(inp); *value = (uint32_t)sctp_max_size_addresses(inp); SCTP_INP_RUNLOCK(inp); *optsize = sizeof(uint32_t); break; } case SCTP_GET_REMOTE_ADDR_SIZE: { uint32_t *value; struct sctp_nets *net; size_t size; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); /* FIXME MT: change to sctp_assoc_value? */ SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t)*value); if (stcb != NULL) { size = 0; /* Count the sizes */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { size += sizeof(struct sockaddr_in6); } else { size += sizeof(struct sockaddr_in); } #else size += sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6: size += sizeof(struct sockaddr_in6); break; #endif default: break; } } SCTP_TCB_UNLOCK(stcb); *value = (uint32_t)size; *optsize = sizeof(uint32_t); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((sctp_assoc_t)*value <= SCTP_ALL_ASSOC)) { error = EINVAL; } else { error = ENOENT; } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } break; } case SCTP_GET_PEER_ADDRESSES: /* * Get the address information, an array is passed in to * fill up we pack it. */ { size_t cpsz, left; struct sockaddr *addr; struct sctp_nets *net; struct sctp_getaddresses *saddr; SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); if (stcb != NULL) { left = *optsize - offsetof(struct sctp_getaddresses, addr); *optsize = offsetof(struct sctp_getaddresses, addr); addr = &saddr->addr[0].sa; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { cpsz = sizeof(struct sockaddr_in6); } else { cpsz = sizeof(struct sockaddr_in); } #else cpsz = sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6: cpsz = sizeof(struct sockaddr_in6); break; #endif default: cpsz = 0; break; } if (cpsz == 0) { break; } if (left < cpsz) { /* not enough room. */ break; } #if defined(INET) && defined(INET6) if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && (net->ro._l_addr.sa.sa_family == AF_INET)) { /* Must map the address */ in6_sin_2_v4mapsin6(&net->ro._l_addr.sin, (struct sockaddr_in6 *)addr); } else { memcpy(addr, &net->ro._l_addr, cpsz); } #else memcpy(addr, &net->ro._l_addr, cpsz); #endif ((struct sockaddr_in *)addr)->sin_port = stcb->rport; addr = (struct sockaddr *)((caddr_t)addr + cpsz); left -= cpsz; *optsize += cpsz; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (saddr->sget_assoc_id <= SCTP_ALL_ASSOC)) { error = EINVAL; } else { error = ENOENT; } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } break; } case SCTP_GET_LOCAL_ADDRESSES: { size_t limit, actual; struct sctp_getaddresses *saddr; SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((saddr->sget_assoc_id == SCTP_CURRENT_ASSOC) || (saddr->sget_assoc_id == SCTP_ALL_ASSOC))) { error = EINVAL; } else { limit = *optsize - offsetof(struct sctp_getaddresses, addr); actual = sctp_fill_up_addresses(inp, stcb, limit, &saddr->addr[0].sa); *optsize = offsetof(struct sctp_getaddresses, addr) + actual; } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } break; } case SCTP_PEER_ADDR_PARAMS: { struct sctp_paddrparams *paddrp; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); #if defined(INET) && defined(INET6) if (paddrp->spp_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&paddrp->spp_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&paddrp->spp_address; } } else { addr = (struct sockaddr *)&paddrp->spp_address; } #else addr = (struct sockaddr *)&paddrp->spp_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (stcb != NULL) { /* Applies to the specific association */ paddrp->spp_flags = 0; if (net != NULL) { paddrp->spp_hbinterval = net->heart_beat_delay; paddrp->spp_pathmaxrxt = net->failure_threshold; paddrp->spp_pathmtu = net->mtu; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: paddrp->spp_pathmtu -= SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: paddrp->spp_pathmtu -= SCTP_MIN_OVERHEAD; break; #endif default: break; } /* get flags for HB */ if (net->dest_state & SCTP_ADDR_NOHB) { paddrp->spp_flags |= SPP_HB_DISABLE; } else { paddrp->spp_flags |= SPP_HB_ENABLE; } /* get flags for PMTU */ if (net->dest_state & SCTP_ADDR_NO_PMTUD) { paddrp->spp_flags |= SPP_PMTUD_DISABLE; } else { paddrp->spp_flags |= SPP_PMTUD_ENABLE; } if (net->dscp & 0x01) { paddrp->spp_dscp = net->dscp & 0xfc; paddrp->spp_flags |= SPP_DSCP; } #ifdef INET6 if ((net->ro._l_addr.sa.sa_family == AF_INET6) && (net->flowlabel & 0x80000000)) { paddrp->spp_ipv6_flowlabel = net->flowlabel & 0x000fffff; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; } #endif } else { /* * No destination so return default * value */ paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; paddrp->spp_pathmtu = stcb->asoc.default_mtu; if (stcb->asoc.default_dscp & 0x01) { paddrp->spp_dscp = stcb->asoc.default_dscp & 0xfc; paddrp->spp_flags |= SPP_DSCP; } #ifdef INET6 if (stcb->asoc.default_flowlabel & 0x80000000) { paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel & 0x000fffff; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; } #endif /* default settings should be these */ if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { paddrp->spp_flags |= SPP_HB_DISABLE; } else { paddrp->spp_flags |= SPP_HB_ENABLE; } if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) { paddrp->spp_flags |= SPP_PMTUD_DISABLE; } else { paddrp->spp_flags |= SPP_PMTUD_ENABLE; } paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; } paddrp->spp_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (paddrp->spp_assoc_id == SCTP_FUTURE_ASSOC))) { /* Use endpoint defaults */ SCTP_INP_RLOCK(inp); paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; paddrp->spp_hbinterval = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); paddrp->spp_assoc_id = SCTP_FUTURE_ASSOC; /* get inp's default */ if (inp->sctp_ep.default_dscp & 0x01) { paddrp->spp_dscp = inp->sctp_ep.default_dscp & 0xfc; paddrp->spp_flags |= SPP_DSCP; } #ifdef INET6 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && (inp->sctp_ep.default_flowlabel & 0x80000000)) { paddrp->spp_ipv6_flowlabel = inp->sctp_ep.default_flowlabel & 0x000fffff; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; } #endif paddrp->spp_pathmtu = inp->sctp_ep.default_mtu; if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { paddrp->spp_flags |= SPP_HB_ENABLE; } else { paddrp->spp_flags |= SPP_HB_DISABLE; } if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) { paddrp->spp_flags |= SPP_PMTUD_ENABLE; } else { paddrp->spp_flags |= SPP_PMTUD_DISABLE; } SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_paddrparams); } break; } case SCTP_GET_PEER_ADDR_INFO: { struct sctp_paddrinfo *paddri; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); #if defined(INET) && defined(INET6) if (paddri->spinfo_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&paddri->spinfo_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&paddri->spinfo_address; } } else { addr = (struct sockaddr *)&paddri->spinfo_address; } #else addr = (struct sockaddr *)&paddri->spinfo_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net != NULL)) { if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { /* It's unconfirmed */ paddri->spinfo_state = SCTP_UNCONFIRMED; } else if (net->dest_state & SCTP_ADDR_REACHABLE) { /* It's active */ paddri->spinfo_state = SCTP_ACTIVE; } else { /* It's inactive */ paddri->spinfo_state = SCTP_INACTIVE; } paddri->spinfo_cwnd = net->cwnd; paddri->spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT; paddri->spinfo_rto = net->RTO; paddri->spinfo_assoc_id = sctp_get_associd(stcb); paddri->spinfo_mtu = net->mtu; switch (addr->sa_family) { #if defined(INET) case AF_INET: paddri->spinfo_mtu -= SCTP_MIN_V4_OVERHEAD; break; #endif #if defined(INET6) case AF_INET6: paddri->spinfo_mtu -= SCTP_MIN_OVERHEAD; break; #endif default: break; } SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_paddrinfo); } else { if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; } break; } case SCTP_PCB_STATUS: { struct sctp_pcbinfo *spcb; SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); sctp_fill_pcbinfo(spcb); *optsize = sizeof(struct sctp_pcbinfo); break; } case SCTP_STATUS: { struct sctp_nets *net; struct sctp_status *sstat; SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } sstat->sstat_state = sctp_map_assoc_state(stcb->asoc.state); sstat->sstat_assoc_id = sctp_get_associd(stcb); sstat->sstat_rwnd = stcb->asoc.peers_rwnd; sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; /* * We can't include chunks that have been passed to * the socket layer. Only things in queue. */ sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + stcb->asoc.cnt_on_all_streams); sstat->sstat_instrms = stcb->asoc.streamincnt; sstat->sstat_outstrms = stcb->asoc.streamoutcnt; sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb); net = stcb->asoc.primary_destination; if (net != NULL) { memcpy(&sstat->sstat_primary.spinfo_address, &net->ro._l_addr, ((struct sockaddr *)(&net->ro._l_addr))->sa_len); ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; /* * Again the user can get info from * sctp_constants.h for what the state of * the network is. */ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { /* It's unconfirmed */ sstat->sstat_primary.spinfo_state = SCTP_UNCONFIRMED; } else if (net->dest_state & SCTP_ADDR_REACHABLE) { /* It's active */ sstat->sstat_primary.spinfo_state = SCTP_ACTIVE; } else { /* It's inactive */ sstat->sstat_primary.spinfo_state = SCTP_INACTIVE; } sstat->sstat_primary.spinfo_cwnd = net->cwnd; sstat->sstat_primary.spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT; sstat->sstat_primary.spinfo_rto = net->RTO; sstat->sstat_primary.spinfo_mtu = net->mtu; switch (stcb->asoc.primary_destination->ro._l_addr.sa.sa_family) { #if defined(INET) case AF_INET: sstat->sstat_primary.spinfo_mtu -= SCTP_MIN_V4_OVERHEAD; break; #endif #if defined(INET6) case AF_INET6: sstat->sstat_primary.spinfo_mtu -= SCTP_MIN_OVERHEAD; break; #endif default: break; } } else { memset(&sstat->sstat_primary, 0, sizeof(struct sctp_paddrinfo)); } sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_status); break; } case SCTP_RTOINFO: { struct sctp_rtoinfo *srto; SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); if (stcb) { srto->srto_initial = stcb->asoc.initial_rto; srto->srto_max = stcb->asoc.maxrto; srto->srto_min = stcb->asoc.minrto; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (srto->srto_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); srto->srto_initial = inp->sctp_ep.initial_rto; srto->srto_max = inp->sctp_ep.sctp_maxrto; srto->srto_min = inp->sctp_ep.sctp_minrto; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_rtoinfo); } break; } case SCTP_TIMEOUTS: { struct sctp_timeouts *stimo; SCTP_CHECK_AND_CAST(stimo, optval, struct sctp_timeouts, *optsize); SCTP_FIND_STCB(inp, stcb, stimo->stimo_assoc_id); if (stcb) { stimo->stimo_init = stcb->asoc.timoinit; stimo->stimo_data = stcb->asoc.timodata; stimo->stimo_sack = stcb->asoc.timosack; stimo->stimo_shutdown = stcb->asoc.timoshutdown; stimo->stimo_heartbeat = stcb->asoc.timoheartbeat; stimo->stimo_cookie = stcb->asoc.timocookie; stimo->stimo_shutdownack = stcb->asoc.timoshutdownack; SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_timeouts); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_ASSOCINFO: { struct sctp_assocparams *sasoc; SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); if (stcb) { sasoc->sasoc_cookie_life = sctp_ticks_to_msecs(stcb->asoc.cookie_life); sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); sasoc->sasoc_cookie_life = sctp_ticks_to_msecs(inp->sctp_ep.def_cookie_life); sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; sasoc->sasoc_number_peer_destinations = 0; sasoc->sasoc_peer_rwnd = 0; sasoc->sasoc_local_rwnd = (uint32_t)sbspace(&inp->sctp_socket->so_rcv); SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assocparams); } break; } case SCTP_DEFAULT_SEND_PARAM: { struct sctp_sndrcvinfo *s_info; SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); if (stcb) { memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (s_info->sinfo_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_sndrcvinfo); } break; } case SCTP_INITMSG: { struct sctp_initmsg *sinit; SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); SCTP_INP_RLOCK(inp); sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; SCTP_INP_RUNLOCK(inp); *optsize = sizeof(struct sctp_initmsg); break; } case SCTP_PRIMARY_ADDR: /* we allow a "get" operation on this */ { struct sctp_setprim *ssp; SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); if (stcb) { union sctp_sockstore *addr; addr = &stcb->asoc.primary_destination->ro._l_addr; switch (addr->sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { in6_sin_2_v4mapsin6(&addr->sin, (struct sockaddr_in6 *)&ssp->ssp_addr); } else { memcpy(&ssp->ssp_addr, &addr->sin, sizeof(struct sockaddr_in)); } #else memcpy(&ssp->ssp_addr, &addr->sin, sizeof(struct sockaddr_in)); #endif break; #endif #ifdef INET6 case AF_INET6: memcpy(&ssp->ssp_addr, &addr->sin6, sizeof(struct sockaddr_in6)); break; #endif default: break; } SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_setprim); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_HMAC_IDENT: { struct sctp_hmacalgo *shmac; sctp_hmaclist_t *hmaclist; size_t size; int i; SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); SCTP_INP_RLOCK(inp); hmaclist = inp->sctp_ep.local_hmacs; if (hmaclist == NULL) { /* no HMACs to return */ *optsize = sizeof(*shmac); SCTP_INP_RUNLOCK(inp); break; } /* is there room for all of the hmac ids? */ size = sizeof(*shmac) + (hmaclist->num_algo * sizeof(shmac->shmac_idents[0])); if (*optsize < size) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } /* copy in the list */ shmac->shmac_number_of_idents = hmaclist->num_algo; for (i = 0; i < hmaclist->num_algo; i++) { shmac->shmac_idents[i] = hmaclist->hmac[i]; } SCTP_INP_RUNLOCK(inp); *optsize = size; break; } case SCTP_AUTH_ACTIVE_KEY: { struct sctp_authkeyid *scact; SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); if (stcb) { /* get the active key on the assoc */ scact->scact_keynumber = stcb->asoc.authinfo.active_keyid; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (scact->scact_assoc_id == SCTP_FUTURE_ASSOC))) { /* get the endpoint active key */ SCTP_INP_RLOCK(inp); scact->scact_keynumber = inp->sctp_ep.default_keyid; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_authkeyid); } break; } case SCTP_LOCAL_AUTH_CHUNKS: { struct sctp_authchunks *sac; sctp_auth_chklist_t *chklist = NULL; size_t size = 0; SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); if (stcb) { /* get off the assoc */ chklist = stcb->asoc.local_auth_chunks; /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if (*optsize < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { /* copy in the chunks */ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); sac->gauth_number_of_chunks = (uint32_t)size; *optsize = sizeof(struct sctp_authchunks) + size; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (sac->gauth_assoc_id == SCTP_FUTURE_ASSOC))) { /* get off the endpoint */ SCTP_INP_RLOCK(inp); chklist = inp->sctp_ep.local_auth_chunks; /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if (*optsize < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { /* copy in the chunks */ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); sac->gauth_number_of_chunks = (uint32_t)size; *optsize = sizeof(struct sctp_authchunks) + size; } SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_PEER_AUTH_CHUNKS: { struct sctp_authchunks *sac; sctp_auth_chklist_t *chklist = NULL; size_t size = 0; SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); if (stcb) { /* get off the assoc */ chklist = stcb->asoc.peer_auth_chunks; /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if (*optsize < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { /* copy in the chunks */ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); sac->gauth_number_of_chunks = (uint32_t)size; *optsize = sizeof(struct sctp_authchunks) + size; } SCTP_TCB_UNLOCK(stcb); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; } break; } case SCTP_EVENT: { struct sctp_event *event; uint32_t event_type; SCTP_CHECK_AND_CAST(event, optval, struct sctp_event, *optsize); SCTP_FIND_STCB(inp, stcb, event->se_assoc_id); switch (event->se_type) { case SCTP_ASSOC_CHANGE: event_type = SCTP_PCB_FLAGS_RECVASSOCEVNT; break; case SCTP_PEER_ADDR_CHANGE: event_type = SCTP_PCB_FLAGS_RECVPADDREVNT; break; case SCTP_REMOTE_ERROR: event_type = SCTP_PCB_FLAGS_RECVPEERERR; break; case SCTP_SEND_FAILED: event_type = SCTP_PCB_FLAGS_RECVSENDFAILEVNT; break; case SCTP_SHUTDOWN_EVENT: event_type = SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT; break; case SCTP_ADAPTATION_INDICATION: event_type = SCTP_PCB_FLAGS_ADAPTATIONEVNT; break; case SCTP_PARTIAL_DELIVERY_EVENT: event_type = SCTP_PCB_FLAGS_PDAPIEVNT; break; case SCTP_AUTHENTICATION_EVENT: event_type = SCTP_PCB_FLAGS_AUTHEVNT; break; case SCTP_STREAM_RESET_EVENT: event_type = SCTP_PCB_FLAGS_STREAM_RESETEVNT; break; case SCTP_SENDER_DRY_EVENT: event_type = SCTP_PCB_FLAGS_DRYEVNT; break; case SCTP_NOTIFICATIONS_STOPPED_EVENT: event_type = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP); error = ENOTSUP; break; case SCTP_ASSOC_RESET_EVENT: event_type = SCTP_PCB_FLAGS_ASSOC_RESETEVNT; break; case SCTP_STREAM_CHANGE_EVENT: event_type = SCTP_PCB_FLAGS_STREAM_CHANGEEVNT; break; case SCTP_SEND_FAILED_EVENT: event_type = SCTP_PCB_FLAGS_RECVNSENDFAILEVNT; break; default: event_type = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (event_type > 0) { if (stcb) { event->se_on = sctp_stcb_is_feature_on(inp, stcb, event_type); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (event->se_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); event->se_on = sctp_is_feature_on(inp, event_type); SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } if (error == 0) { *optsize = sizeof(struct sctp_event); } break; } case SCTP_RECVRCVINFO: if (*optsize < sizeof(int)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_RLOCK(inp); *(int *)optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO); SCTP_INP_RUNLOCK(inp); *optsize = sizeof(int); } break; case SCTP_RECVNXTINFO: if (*optsize < sizeof(int)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_RLOCK(inp); *(int *)optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO); SCTP_INP_RUNLOCK(inp); *optsize = sizeof(int); } break; case SCTP_DEFAULT_SNDINFO: { struct sctp_sndinfo *info; SCTP_CHECK_AND_CAST(info, optval, struct sctp_sndinfo, *optsize); SCTP_FIND_STCB(inp, stcb, info->snd_assoc_id); if (stcb) { info->snd_sid = stcb->asoc.def_send.sinfo_stream; info->snd_flags = stcb->asoc.def_send.sinfo_flags; info->snd_flags &= 0xfff0; info->snd_ppid = stcb->asoc.def_send.sinfo_ppid; info->snd_context = stcb->asoc.def_send.sinfo_context; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (info->snd_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); info->snd_sid = inp->def_send.sinfo_stream; info->snd_flags = inp->def_send.sinfo_flags; info->snd_flags &= 0xfff0; info->snd_ppid = inp->def_send.sinfo_ppid; info->snd_context = inp->def_send.sinfo_context; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_sndinfo); } break; } case SCTP_DEFAULT_PRINFO: { struct sctp_default_prinfo *info; SCTP_CHECK_AND_CAST(info, optval, struct sctp_default_prinfo, *optsize); SCTP_FIND_STCB(inp, stcb, info->pr_assoc_id); if (stcb) { info->pr_policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags); info->pr_value = stcb->asoc.def_send.sinfo_timetolive; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (info->pr_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); info->pr_policy = PR_SCTP_POLICY(inp->def_send.sinfo_flags); info->pr_value = inp->def_send.sinfo_timetolive; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_default_prinfo); } break; } case SCTP_PEER_ADDR_THLDS: { struct sctp_paddrthlds *thlds; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, *optsize); SCTP_FIND_STCB(inp, stcb, thlds->spt_assoc_id); #if defined(INET) && defined(INET6) if (thlds->spt_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&thlds->spt_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&thlds->spt_address; } } else { addr = (struct sockaddr *)&thlds->spt_address; } #else addr = (struct sockaddr *)&thlds->spt_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (stcb != NULL) { if (net != NULL) { thlds->spt_pathmaxrxt = net->failure_threshold; thlds->spt_pathpfthld = net->pf_threshold; thlds->spt_pathcpthld = 0xffff; } else { thlds->spt_pathmaxrxt = stcb->asoc.def_net_failure; thlds->spt_pathpfthld = stcb->asoc.def_net_pf_threshold; thlds->spt_pathcpthld = 0xffff; } thlds->spt_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (thlds->spt_assoc_id == SCTP_FUTURE_ASSOC))) { /* Use endpoint defaults */ SCTP_INP_RLOCK(inp); thlds->spt_pathmaxrxt = inp->sctp_ep.def_net_failure; thlds->spt_pathpfthld = inp->sctp_ep.def_net_pf_threshold; thlds->spt_pathcpthld = 0xffff; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_paddrthlds); } break; } case SCTP_REMOTE_UDP_ENCAPS_PORT: { struct sctp_udpencaps *encaps; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, *optsize); SCTP_FIND_STCB(inp, stcb, encaps->sue_assoc_id); #if defined(INET) && defined(INET6) if (encaps->sue_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&encaps->sue_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&encaps->sue_address; } } else { addr = (struct sockaddr *)&encaps->sue_address; } #else addr = (struct sockaddr *)&encaps->sue_address; #endif if (stcb) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (stcb != NULL) { if (net) { encaps->sue_port = net->port; } else { encaps->sue_port = stcb->asoc.port; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (encaps->sue_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); encaps->sue_port = inp->sctp_ep.port; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_udpencaps); } break; } case SCTP_ECN_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.ecn_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->ecn_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_PR_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.prsctp_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->prsctp_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_AUTH_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.auth_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->auth_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_ASCONF_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.asconf_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->asconf_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_RECONFIG_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.reconfig_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->reconfig_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_NRSACK_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.nrsack_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->nrsack_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_PKTDROP_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.pktdrop_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->pktdrop_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_ENABLE_STREAM_RESET: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = (uint32_t)stcb->asoc.local_strreset_support; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = (uint32_t)inp->local_strreset_support; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_PR_STREAM_STATUS: { struct sctp_prstatus *sprstat; uint16_t sid; uint16_t policy; SCTP_CHECK_AND_CAST(sprstat, optval, struct sctp_prstatus, *optsize); SCTP_FIND_STCB(inp, stcb, sprstat->sprstat_assoc_id); sid = sprstat->sprstat_sid; policy = sprstat->sprstat_policy; #if defined(SCTP_DETAILED_STR_STATS) if ((stcb != NULL) && (sid < stcb->asoc.streamoutcnt) && (policy != SCTP_PR_SCTP_NONE) && ((policy <= SCTP_PR_SCTP_MAX) || (policy == SCTP_PR_SCTP_ALL))) { if (policy == SCTP_PR_SCTP_ALL) { sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[0]; sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[0]; } else { sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[policy]; sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[policy]; } #else if ((stcb != NULL) && (sid < stcb->asoc.streamoutcnt) && (policy == SCTP_PR_SCTP_ALL)) { sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[0]; sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[0]; #endif } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } if (error == 0) { *optsize = sizeof(struct sctp_prstatus); } break; } case SCTP_PR_ASSOC_STATUS: { struct sctp_prstatus *sprstat; uint16_t policy; SCTP_CHECK_AND_CAST(sprstat, optval, struct sctp_prstatus, *optsize); SCTP_FIND_STCB(inp, stcb, sprstat->sprstat_assoc_id); policy = sprstat->sprstat_policy; if ((stcb != NULL) && (policy != SCTP_PR_SCTP_NONE) && ((policy <= SCTP_PR_SCTP_MAX) || (policy == SCTP_PR_SCTP_ALL))) { if (policy == SCTP_PR_SCTP_ALL) { sprstat->sprstat_abandoned_unsent = stcb->asoc.abandoned_unsent[0]; sprstat->sprstat_abandoned_sent = stcb->asoc.abandoned_sent[0]; } else { sprstat->sprstat_abandoned_unsent = stcb->asoc.abandoned_unsent[policy]; sprstat->sprstat_abandoned_sent = stcb->asoc.abandoned_sent[policy]; } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } if (error == 0) { *optsize = sizeof(struct sctp_prstatus); } break; } case SCTP_MAX_CWND: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.max_cwnd; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->max_cwnd; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; break; } /* end switch (sopt->sopt_name) */ if (error) { *optsize = 0; } return (error); } static int sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, void *p) { int error, set_opt; uint32_t *mopt; struct sctp_tcb *stcb = NULL; struct sctp_inpcb *inp = NULL; uint32_t vrf_id; if (optval == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } vrf_id = inp->def_vrf_id; error = 0; switch (optname) { case SCTP_NODELAY: case SCTP_AUTOCLOSE: case SCTP_AUTO_ASCONF: case SCTP_EXPLICIT_EOR: case SCTP_DISABLE_FRAGMENTS: case SCTP_USE_EXT_RCVINFO: case SCTP_I_WANT_MAPPED_V4_ADDR: /* copy in the option value */ SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); set_opt = 0; if (error) break; switch (optname) { case SCTP_DISABLE_FRAGMENTS: set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; break; case SCTP_AUTO_ASCONF: /* * NOTE: we don't really support this flag */ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* only valid for bound all sockets */ if ((SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) && (*mopt != 0)) { /* forbidden by admin */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPERM); return (EPERM); } set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } break; case SCTP_EXPLICIT_EOR: set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; break; case SCTP_USE_EXT_RCVINFO: set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; break; case SCTP_I_WANT_MAPPED_V4_ADDR: if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } break; case SCTP_NODELAY: set_opt = SCTP_PCB_FLAGS_NODELAY; break; case SCTP_AUTOCLOSE: if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; /* * The value is in ticks. Note this does not effect * old associations, only new ones. */ inp->sctp_ep.auto_close_time = sctp_secs_to_ticks(*mopt); break; } SCTP_INP_WLOCK(inp); if (*mopt != 0) { sctp_feature_on(inp, set_opt); } else { sctp_feature_off(inp, set_opt); } SCTP_INP_WUNLOCK(inp); break; case SCTP_REUSE_PORT: { SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { /* Can't set it after we are bound */ error = EINVAL; break; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { /* Can't do this for a 1-m socket */ error = EINVAL; break; } if (optval) sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); else sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE); break; } case SCTP_PARTIAL_DELIVERY_POINT: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); if (*value > SCTP_SB_LIMIT_RCV(so)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } inp->partial_delivery_point = *value; break; } case SCTP_FRAGMENT_INTERLEAVE: /* not yet until we re-write sctp_recvmsg() */ { uint32_t *level; SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); if (*level == SCTP_FRAG_LEVEL_2) { sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } else if (*level == SCTP_FRAG_LEVEL_1) { sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } else if (*level == SCTP_FRAG_LEVEL_0) { sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_INTERLEAVING_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->idata_supported = 0; } else { if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS))) { inp->idata_supported = 1; } else { /* * Must have Frag * interleave and * stream interleave * on */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_CMT_ON_OFF: if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); if (av->assoc_value > SCTP_CMT_MAX) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.sctp_cmt_on_off = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); inp->sctp_cmt_on_off = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.sctp_cmt_on_off = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; } break; case SCTP_PLUGGABLE_CC: { struct sctp_assoc_value *av; struct sctp_nets *net; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); if ((av->assoc_value != SCTP_CC_RFC2581) && (av->assoc_value != SCTP_CC_HSTCP) && (av->assoc_value != SCTP_CC_HTCP) && (av->assoc_value != SCTP_CC_RTCC)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value]; stcb->asoc.congestion_control_module = av->assoc_value; if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); } } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); inp->sctp_ep.sctp_default_cc_module = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value]; stcb->asoc.congestion_control_module = av->assoc_value; if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); } } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_CC_OPTION: { struct sctp_cc_option *cc_opt; SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, optsize); SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id); if (stcb == NULL) { if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (cc_opt->aid_value.assoc_id == SCTP_CURRENT_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (stcb->asoc.cc_functions.sctp_cwnd_socket_option) { (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 1, cc_opt); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else { error = EINVAL; } } else { if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) { error = ENOTSUP; } else { error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 1, cc_opt); } SCTP_TCB_UNLOCK(stcb); } break; } case SCTP_STREAM_SCHEDULER: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); if ((av->assoc_value != SCTP_SS_DEFAULT) && (av->assoc_value != SCTP_SS_RR) && (av->assoc_value != SCTP_SS_RR_PKT) && (av->assoc_value != SCTP_SS_PRIO) && (av->assoc_value != SCTP_SS_FB) && (av->assoc_value != SCTP_SS_FCFS)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, true); stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value]; stcb->asoc.stream_scheduling_module = av->assoc_value; stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); inp->sctp_ep.sctp_default_ss_module = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, true); stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value]; stcb->asoc.stream_scheduling_module = av->assoc_value; stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_STREAM_SCHEDULER_VALUE: { struct sctp_stream_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { if ((av->stream_id >= stcb->asoc.streamoutcnt) || (stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], av->stream_value) < 0)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_CURRENT_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (av->stream_id < stcb->asoc.streamoutcnt) { stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], av->stream_value); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else { /* * Can't set stream value without * association */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_CLR_STAT_LOG: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; break; case SCTP_CONTEXT: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.context = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); inp->sctp_context = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.context = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_VRF_ID: { uint32_t *default_vrfid; SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); if (*default_vrfid > SCTP_MAX_VRF_ID) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } inp->def_vrf_id = *default_vrfid; break; } case SCTP_DEL_VRF_ID: { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; break; } case SCTP_ADD_VRF_ID: { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; break; } case SCTP_DELAYED_SACK: { struct sctp_sack_info *sack; SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); if (sack->sack_delay) { if (sack->sack_delay > SCTP_MAX_SACK_DELAY) { error = EINVAL; if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } break; } } if (stcb) { if (sack->sack_delay) { stcb->asoc.delayed_ack = sack->sack_delay; } if (sack->sack_freq) { stcb->asoc.sack_freq = sack->sack_freq; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((sack->sack_assoc_id == SCTP_FUTURE_ASSOC) || (sack->sack_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); if (sack->sack_delay) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = sctp_msecs_to_ticks(sack->sack_delay); } if (sack->sack_freq) { inp->sctp_ep.sctp_sack_freq = sack->sack_freq; } SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((sack->sack_assoc_id == SCTP_CURRENT_ASSOC) || (sack->sack_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (sack->sack_delay) { stcb->asoc.delayed_ack = sack->sack_delay; } if (sack->sack_freq) { stcb->asoc.sack_freq = sack->sack_freq; } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_AUTH_CHUNK: { struct sctp_authchunk *sauth; SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); SCTP_INP_WLOCK(inp); if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { inp->auth_supported = 1; } SCTP_INP_WUNLOCK(inp); break; } case SCTP_AUTH_KEY: { struct sctp_authkey *sca; struct sctp_keyhead *shared_keys; sctp_sharedkey_t *shared_key; sctp_key_t *key = NULL; size_t size; SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); if (sca->sca_keylength == 0) { size = optsize - sizeof(struct sctp_authkey); } else { if (sca->sca_keylength + sizeof(struct sctp_authkey) <= optsize) { size = sca->sca_keylength; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } } SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); if (stcb) { shared_keys = &stcb->asoc.shared_keys; /* clear the cached keys for this key id */ sctp_clear_cachedkeys(stcb, sca->sca_keynumber); /* * create the new shared key and * insert/replace it */ if (size > 0) { key = sctp_set_key(sca->sca_key, (uint32_t)size); if (key == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_TCB_UNLOCK(stcb); break; } } shared_key = sctp_alloc_sharedkey(); if (shared_key == NULL) { sctp_free_key(key); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_TCB_UNLOCK(stcb); break; } shared_key->key = key; shared_key->keyid = sca->sca_keynumber; error = sctp_insert_sharedkey(shared_keys, shared_key); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((sca->sca_assoc_id == SCTP_FUTURE_ASSOC) || (sca->sca_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); shared_keys = &inp->sctp_ep.shared_keys; /* * clear the cached keys on all * assocs for this key id */ sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); /* * create the new shared key and * insert/replace it */ if (size > 0) { key = sctp_set_key(sca->sca_key, (uint32_t)size); if (key == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_INP_WUNLOCK(inp); break; } } shared_key = sctp_alloc_sharedkey(); if (shared_key == NULL) { sctp_free_key(key); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_INP_WUNLOCK(inp); break; } shared_key->key = key; shared_key->keyid = sca->sca_keynumber; error = sctp_insert_sharedkey(shared_keys, shared_key); SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((sca->sca_assoc_id == SCTP_CURRENT_ASSOC) || (sca->sca_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); shared_keys = &stcb->asoc.shared_keys; /* * clear the cached keys for * this key id */ sctp_clear_cachedkeys(stcb, sca->sca_keynumber); /* * create the new shared key * and insert/replace it */ if (size > 0) { key = sctp_set_key(sca->sca_key, (uint32_t)size); if (key == NULL) { SCTP_TCB_UNLOCK(stcb); continue; } } shared_key = sctp_alloc_sharedkey(); if (shared_key == NULL) { sctp_free_key(key); SCTP_TCB_UNLOCK(stcb); continue; } shared_key->key = key; shared_key->keyid = sca->sca_keynumber; error = sctp_insert_sharedkey(shared_keys, shared_key); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_HMAC_IDENT: { struct sctp_hmacalgo *shmac; sctp_hmaclist_t *hmaclist; uint16_t hmacid; uint32_t i; SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); if ((optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) || (shmac->shmac_number_of_idents > 0xffff)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } hmaclist = sctp_alloc_hmaclist((uint16_t)shmac->shmac_number_of_idents); if (hmaclist == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; break; } for (i = 0; i < shmac->shmac_number_of_idents; i++) { hmacid = shmac->shmac_idents[i]; if (sctp_auth_add_hmacid(hmaclist, hmacid)) { /* invalid HMACs were found */ ; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; sctp_free_hmaclist(hmaclist); goto sctp_set_hmac_done; } } for (i = 0; i < hmaclist->num_algo; i++) { if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { /* already in list */ break; } } if (i == hmaclist->num_algo) { /* not found in list */ sctp_free_hmaclist(hmaclist); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } /* set it on the endpoint */ SCTP_INP_WLOCK(inp); if (inp->sctp_ep.local_hmacs) sctp_free_hmaclist(inp->sctp_ep.local_hmacs); inp->sctp_ep.local_hmacs = hmaclist; SCTP_INP_WUNLOCK(inp); sctp_set_hmac_done: break; } case SCTP_AUTH_ACTIVE_KEY: { struct sctp_authkeyid *scact; SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize); SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); /* set the active key on the right place */ if (stcb) { /* set the active key on the assoc */ if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((scact->scact_assoc_id == SCTP_FUTURE_ASSOC) || (scact->scact_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((scact->scact_assoc_id == SCTP_CURRENT_ASSOC) || (scact->scact_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); sctp_auth_setactivekey(stcb, scact->scact_keynumber); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_AUTH_DELETE_KEY: { struct sctp_authkeyid *scdel; SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize); SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); /* delete the key from the right place */ if (stcb) { if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((scdel->scact_assoc_id == SCTP_FUTURE_ASSOC) || (scdel->scact_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((scdel->scact_assoc_id == SCTP_CURRENT_ASSOC) || (scdel->scact_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); sctp_delete_sharedkey(stcb, scdel->scact_keynumber); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_AUTH_DEACTIVATE_KEY: { struct sctp_authkeyid *keyid; SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, optsize); SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id); /* deactivate the key from the right place */ if (stcb) { if (sctp_deact_sharedkey(stcb, keyid->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((keyid->scact_assoc_id == SCTP_FUTURE_ASSOC) || (keyid->scact_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); if (sctp_deact_sharedkey_ep(inp, keyid->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((keyid->scact_assoc_id == SCTP_CURRENT_ASSOC) || (keyid->scact_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); sctp_deact_sharedkey(stcb, keyid->scact_keynumber); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_ENABLE_STREAM_RESET: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); if (av->assoc_value & (~SCTP_ENABLE_VALUE_MASK)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.local_strreset_support = (uint8_t)av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); inp->local_strreset_support = (uint8_t)av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.local_strreset_support = (uint8_t)av->assoc_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_RESET_STREAMS: { struct sctp_reset_streams *strrst; int i, send_out = 0; int send_in = 0; SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_reset_streams, optsize); SCTP_FIND_STCB(inp, stcb, strrst->srs_assoc_id); if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; break; } if (stcb->asoc.reconfig_supported == 0) { /* * Peer does not support the chunk type. */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; SCTP_TCB_UNLOCK(stcb); break; } if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } if (sizeof(struct sctp_reset_streams) + strrst->srs_number_streams * sizeof(uint16_t) > optsize) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } if (strrst->srs_flags & SCTP_STREAM_RESET_INCOMING) { send_in = 1; if (stcb->asoc.stream_reset_outstanding) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; SCTP_TCB_UNLOCK(stcb); break; } } if (strrst->srs_flags & SCTP_STREAM_RESET_OUTGOING) { send_out = 1; } if ((strrst->srs_number_streams > SCTP_MAX_STREAMS_AT_ONCE_RESET) && send_in) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_TCB_UNLOCK(stcb); break; } if ((send_in == 0) && (send_out == 0)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } for (i = 0; i < strrst->srs_number_streams; i++) { if ((send_in) && (strrst->srs_stream_list[i] >= stcb->asoc.streamincnt)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if ((send_out) && (strrst->srs_stream_list[i] >= stcb->asoc.streamoutcnt)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } } if (error) { SCTP_TCB_UNLOCK(stcb); break; } if (send_out) { int cnt; uint16_t strm; if (strrst->srs_number_streams) { for (i = 0, cnt = 0; i < strrst->srs_number_streams; i++) { strm = strrst->srs_stream_list[i]; if (stcb->asoc.strmout[strm].state == SCTP_STREAM_OPEN) { stcb->asoc.strmout[strm].state = SCTP_STREAM_RESET_PENDING; cnt++; } } } else { /* Its all */ for (i = 0, cnt = 0; i < stcb->asoc.streamoutcnt; i++) { if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN) { stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING; cnt++; } } } } if (send_in) { error = sctp_send_str_reset_req(stcb, strrst->srs_number_streams, strrst->srs_stream_list, send_in, 0, 0, 0, 0, 0); } else { error = sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_LOCKED); } if (error == 0) { sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); } else { /* * For outgoing streams don't report any * problems in sending the request to the * application. XXX: Double check resetting * incoming streams. */ error = 0; } SCTP_TCB_UNLOCK(stcb); break; } case SCTP_ADD_STREAMS: { struct sctp_add_streams *stradd; uint8_t addstream = 0; uint16_t add_o_strmcnt = 0; uint16_t add_i_strmcnt = 0; SCTP_CHECK_AND_CAST(stradd, optval, struct sctp_add_streams, optsize); SCTP_FIND_STCB(inp, stcb, stradd->sas_assoc_id); if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; break; } if (stcb->asoc.reconfig_supported == 0) { /* * Peer does not support the chunk type. */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; SCTP_TCB_UNLOCK(stcb); break; } if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } if (stcb->asoc.stream_reset_outstanding) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; SCTP_TCB_UNLOCK(stcb); break; } if ((stradd->sas_outstrms == 0) && (stradd->sas_instrms == 0)) { error = EINVAL; goto skip_stuff; } if (stradd->sas_outstrms) { addstream = 1; /* We allocate here */ add_o_strmcnt = stradd->sas_outstrms; if ((((int)add_o_strmcnt) + ((int)stcb->asoc.streamoutcnt)) > 0x0000ffff) { /* You can't have more than 64k */ error = EINVAL; goto skip_stuff; } } if (stradd->sas_instrms) { int cnt; addstream |= 2; /* * We allocate inside * sctp_send_str_reset_req() */ add_i_strmcnt = stradd->sas_instrms; cnt = add_i_strmcnt; cnt += stcb->asoc.streamincnt; if (cnt > 0x0000ffff) { /* You can't have more than 64k */ error = EINVAL; goto skip_stuff; } if (cnt > (int)stcb->asoc.max_inbound_streams) { /* More than you are allowed */ error = EINVAL; goto skip_stuff; } } error = sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, addstream, add_o_strmcnt, add_i_strmcnt, 0); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); skip_stuff: SCTP_TCB_UNLOCK(stcb); break; } case SCTP_RESET_ASSOC: { int i; uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t)*value); if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; break; } if (stcb->asoc.reconfig_supported == 0) { /* * Peer does not support the chunk type. */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; SCTP_TCB_UNLOCK(stcb); break; } if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } if (stcb->asoc.stream_reset_outstanding) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; SCTP_TCB_UNLOCK(stcb); break; } /* * Is there any data pending in the send or sent * queues? */ if (!TAILQ_EMPTY(&stcb->asoc.send_queue) || !TAILQ_EMPTY(&stcb->asoc.sent_queue)) { busy_out: error = EBUSY; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); SCTP_TCB_UNLOCK(stcb); break; } /* Do any streams have data queued? */ for (i = 0; i < stcb->asoc.streamoutcnt; i++) { if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { goto busy_out; } } error = sctp_send_str_reset_req(stcb, 0, NULL, 0, 1, 0, 0, 0, 0); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); break; } case SCTP_CONNECT_X: if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); break; case SCTP_CONNECT_X_DELAYED: if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); break; case SCTP_CONNECT_X_COMPLETE: { struct sockaddr *sa; /* FIXME MT: check correct? */ SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); /* find tcb */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; break; } if (stcb->asoc.delayed_connection == 1) { stcb->asoc.delayed_connection = 0; (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); } else { /* * already expired or did not use delayed * connectx */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; } SCTP_TCB_UNLOCK(stcb); break; } case SCTP_MAX_BURST: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.max_burst = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); inp->sctp_ep.max_burst = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.max_burst = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_MAXSEG: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.sctp_frag_point = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); inp->sctp_frag_point = av->assoc_value; SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_EVENTS: { struct sctp_event_subscribe *events; SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); SCTP_INP_WLOCK(inp); if (events->sctp_data_io_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); } if (events->sctp_association_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); } if (events->sctp_address_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); } if (events->sctp_send_failure_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } if (events->sctp_peer_error_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); } if (events->sctp_shutdown_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } if (events->sctp_partial_delivery_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); } if (events->sctp_adaptation_layer_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } if (events->sctp_authentication_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); } if (events->sctp_sender_dry_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT); } if (events->sctp_stream_reset_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (events->sctp_association_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT); } if (events->sctp_address_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT); } if (events->sctp_send_failure_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } if (events->sctp_peer_error_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVPEERERR); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVPEERERR); } if (events->sctp_shutdown_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } if (events->sctp_partial_delivery_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT); } if (events->sctp_adaptation_layer_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } if (events->sctp_authentication_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_AUTHEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_AUTHEVNT); } if (events->sctp_sender_dry_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DRYEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DRYEVNT); } if (events->sctp_stream_reset_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } SCTP_TCB_UNLOCK(stcb); } /* * Send up the sender dry event only for 1-to-1 * style sockets. */ if (events->sctp_sender_dry_event) { if (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) != 0) && !SCTP_IS_LISTENING(inp)) { stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb != NULL) { SCTP_TCB_LOCK(stcb); if (TAILQ_EMPTY(&stcb->asoc.send_queue) && TAILQ_EMPTY(&stcb->asoc.sent_queue) && (stcb->asoc.stream_queue_cnt == 0)) { sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); } SCTP_TCB_UNLOCK(stcb); } } } SCTP_INP_WUNLOCK(inp); break; } case SCTP_ADAPTATION_LAYER: { struct sctp_setadaptation *adap_bits; SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); SCTP_INP_WLOCK(inp); inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; inp->sctp_ep.adaptation_layer_indicator_provided = 1; SCTP_INP_WUNLOCK(inp); break; } #ifdef SCTP_DEBUG case SCTP_SET_INITIAL_DBG_SEQ: { uint32_t *vvv; SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); SCTP_INP_WLOCK(inp); inp->sctp_ep.initial_sequence_debug = *vvv; SCTP_INP_WUNLOCK(inp); break; } #endif case SCTP_DEFAULT_SEND_PARAM: { struct sctp_sndrcvinfo *s_info; SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); if (stcb) { if (s_info->sinfo_stream < stcb->asoc.streamoutcnt) { memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((s_info->sinfo_assoc_id == SCTP_FUTURE_ASSOC) || (s_info->sinfo_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((s_info->sinfo_assoc_id == SCTP_CURRENT_ASSOC) || (s_info->sinfo_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (s_info->sinfo_stream < stcb->asoc.streamoutcnt) { memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_PEER_ADDR_PARAMS: { struct sctp_paddrparams *paddrp; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); #if defined(INET) && defined(INET6) if (paddrp->spp_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&paddrp->spp_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&paddrp->spp_address; } } else { addr = (struct sockaddr *)&paddrp->spp_address; } #else addr = (struct sockaddr *)&paddrp->spp_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } /* sanity checks */ if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu > 0) && ((paddrp->spp_pathmtu < SCTP_SMALLEST_PMTU) || (paddrp->spp_pathmtu > SCTP_LARGEST_PMTU))) { if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if (stcb != NULL) { /************************TCB SPECIFIC SET ******************/ if (net != NULL) { /************************NET SPECIFIC SET ******************/ if (paddrp->spp_flags & SPP_HB_DISABLE) { if (((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) && ((net->dest_state & SCTP_ADDR_NOHB) == 0)) { sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); } net->dest_state |= SCTP_ADDR_NOHB; } if (paddrp->spp_flags & SPP_HB_ENABLE) { if (paddrp->spp_hbinterval) { net->heart_beat_delay = paddrp->spp_hbinterval; } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { net->heart_beat_delay = 0; } sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); net->dest_state &= ~SCTP_ADDR_NOHB; } if (paddrp->spp_flags & SPP_HB_DEMAND) { if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SOCKOPT, SCTP_SO_LOCKED); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); } } if (paddrp->spp_flags & SPP_PMTUD_DISABLE) { if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); } net->dest_state |= SCTP_ADDR_NO_PMTUD; if (paddrp->spp_pathmtu > 0) { net->mtu = paddrp->spp_pathmtu; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: net->mtu += SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: net->mtu += SCTP_MIN_OVERHEAD; break; #endif default: break; } if (net->mtu < stcb->asoc.smallest_mtu) { sctp_pathmtu_adjustment(stcb, net->mtu, true); } } } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { if (!SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } net->dest_state &= ~SCTP_ADDR_NO_PMTUD; } if (paddrp->spp_pathmaxrxt > 0) { if (net->dest_state & SCTP_ADDR_PF) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_PF; } } else { if ((net->error_count <= paddrp->spp_pathmaxrxt) && (net->error_count > net->pf_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); } } if (net->dest_state & SCTP_ADDR_REACHABLE) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { if (net->error_count <= paddrp->spp_pathmaxrxt) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } net->failure_threshold = paddrp->spp_pathmaxrxt; } if (paddrp->spp_flags & SPP_DSCP) { net->dscp = paddrp->spp_dscp & 0xfc; net->dscp |= 0x01; } #ifdef INET6 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { if (net->ro._l_addr.sa.sa_family == AF_INET6) { net->flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff; net->flowlabel |= 0x80000000; } } #endif } else { /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ if (paddrp->spp_pathmaxrxt > 0) { stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->dest_state & SCTP_ADDR_PF) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_PF; } } else { if ((net->error_count <= paddrp->spp_pathmaxrxt) && (net->error_count > net->pf_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_13); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); } } if (net->dest_state & SCTP_ADDR_REACHABLE) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { if (net->error_count <= paddrp->spp_pathmaxrxt) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } net->failure_threshold = paddrp->spp_pathmaxrxt; } } if (paddrp->spp_flags & SPP_HB_ENABLE) { if (paddrp->spp_hbinterval != 0) { stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { stcb->asoc.heart_beat_delay = 0; } /* Turn back on the timer */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (paddrp->spp_hbinterval != 0) { net->heart_beat_delay = paddrp->spp_hbinterval; } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { net->heart_beat_delay = 0; } if (net->dest_state & SCTP_ADDR_NOHB) { net->dest_state &= ~SCTP_ADDR_NOHB; } sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_14); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); } sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } if (paddrp->spp_flags & SPP_HB_DISABLE) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { net->dest_state |= SCTP_ADDR_NOHB; if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_15); } } } sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } if (paddrp->spp_flags & SPP_PMTUD_DISABLE) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_16); } net->dest_state |= SCTP_ADDR_NO_PMTUD; if (paddrp->spp_pathmtu > 0) { net->mtu = paddrp->spp_pathmtu; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: net->mtu += SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: net->mtu += SCTP_MIN_OVERHEAD; break; #endif default: break; } if (net->mtu < stcb->asoc.smallest_mtu) { sctp_pathmtu_adjustment(stcb, net->mtu, true); } } } if (paddrp->spp_pathmtu > 0) { stcb->asoc.default_mtu = paddrp->spp_pathmtu; } sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (!SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } net->dest_state &= ~SCTP_ADDR_NO_PMTUD; } stcb->asoc.default_mtu = 0; sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } if (paddrp->spp_flags & SPP_DSCP) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { net->dscp = paddrp->spp_dscp & 0xfc; net->dscp |= 0x01; } stcb->asoc.default_dscp = paddrp->spp_dscp & 0xfc; stcb->asoc.default_dscp |= 0x01; } #ifdef INET6 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->ro._l_addr.sa.sa_family == AF_INET6) { net->flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff; net->flowlabel |= 0x80000000; } } stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff; stcb->asoc.default_flowlabel |= 0x80000000; } #endif } SCTP_TCB_UNLOCK(stcb); } else { /************************NO TCB, SET TO default stuff ******************/ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (paddrp->spp_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); /* * For the TOS/FLOWLABEL stuff you * set it with the options on the * socket */ if (paddrp->spp_pathmaxrxt > 0) { inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; } if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; else if (paddrp->spp_hbinterval != 0) { if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(paddrp->spp_hbinterval); } if (paddrp->spp_flags & SPP_HB_ENABLE) { if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; } else if (paddrp->spp_hbinterval) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(paddrp->spp_hbinterval); } sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } else if (paddrp->spp_flags & SPP_HB_DISABLE) { sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { inp->sctp_ep.default_mtu = 0; sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } else if (paddrp->spp_flags & SPP_PMTUD_DISABLE) { if (paddrp->spp_pathmtu > 0) { inp->sctp_ep.default_mtu = paddrp->spp_pathmtu; } sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } if (paddrp->spp_flags & SPP_DSCP) { inp->sctp_ep.default_dscp = paddrp->spp_dscp & 0xfc; inp->sctp_ep.default_dscp |= 0x01; } #ifdef INET6 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { inp->sctp_ep.default_flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff; inp->sctp_ep.default_flowlabel |= 0x80000000; } } #endif SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_RTOINFO: { struct sctp_rtoinfo *srto; uint32_t new_init, new_min, new_max; SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); if (stcb) { if (srto->srto_initial) new_init = srto->srto_initial; else new_init = stcb->asoc.initial_rto; if (srto->srto_max) new_max = srto->srto_max; else new_max = stcb->asoc.maxrto; if (srto->srto_min) new_min = srto->srto_min; else new_min = stcb->asoc.minrto; if ((new_min <= new_init) && (new_init <= new_max)) { stcb->asoc.initial_rto = new_init; stcb->asoc.maxrto = new_max; stcb->asoc.minrto = new_min; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (srto->srto_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); if (srto->srto_initial) new_init = srto->srto_initial; else new_init = inp->sctp_ep.initial_rto; if (srto->srto_max) new_max = srto->srto_max; else new_max = inp->sctp_ep.sctp_maxrto; if (srto->srto_min) new_min = srto->srto_min; else new_min = inp->sctp_ep.sctp_minrto; if ((new_min <= new_init) && (new_init <= new_max)) { inp->sctp_ep.initial_rto = new_init; inp->sctp_ep.sctp_maxrto = new_max; inp->sctp_ep.sctp_minrto = new_min; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_ASSOCINFO: { struct sctp_assocparams *sasoc; SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); if (sasoc->sasoc_cookie_life > 0) { /* boundary check the cookie life */ if (sasoc->sasoc_cookie_life < SCTP_MIN_COOKIE_LIFE) { sasoc->sasoc_cookie_life = SCTP_MIN_COOKIE_LIFE; } if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; } } if (stcb) { if (sasoc->sasoc_asocmaxrxt > 0) { stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; } if (sasoc->sasoc_cookie_life > 0) { stcb->asoc.cookie_life = sctp_msecs_to_ticks(sasoc->sasoc_cookie_life); } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); if (sasoc->sasoc_asocmaxrxt > 0) { inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; } if (sasoc->sasoc_cookie_life > 0) { inp->sctp_ep.def_cookie_life = sctp_msecs_to_ticks(sasoc->sasoc_cookie_life); } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_INITMSG: { struct sctp_initmsg *sinit; SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); SCTP_INP_WLOCK(inp); if (sinit->sinit_num_ostreams) inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; if (sinit->sinit_max_instreams) inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; if (sinit->sinit_max_attempts) inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; if (sinit->sinit_max_init_timeo) inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; SCTP_INP_WUNLOCK(inp); break; } case SCTP_PRIMARY_ADDR: { struct sctp_setprim *spa; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); #if defined(INET) && defined(INET6) if (spa->ssp_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&spa->ssp_addr; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&spa->ssp_addr; } } else { addr = (struct sockaddr *)&spa->ssp_addr; } #else addr = (struct sockaddr *)&spa->ssp_addr; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net != NULL)) { if (net != stcb->asoc.primary_destination) { if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { /* Ok we need to set it */ if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { if ((stcb->asoc.alternate) && ((net->dest_state & SCTP_ADDR_PF) == 0) && (net->dest_state & SCTP_ADDR_REACHABLE)) { sctp_free_remote_addr(stcb->asoc.alternate); stcb->asoc.alternate = NULL; } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } break; } case SCTP_SET_DYNAMIC_PRIMARY: { union sctp_sockstore *ss; error = priv_check(curthread, PRIV_NETINET_RESERVEDPORT); if (error) break; SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); /* SUPER USER CHECK? */ error = sctp_dynamic_set_primary(&ss->sa, vrf_id); break; } case SCTP_SET_PEER_PRIMARY_ADDR: { struct sctp_setpeerprim *sspp; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); if (stcb != NULL) { struct sctp_ifa *ifa; #if defined(INET) && defined(INET6) if (sspp->sspp_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&sspp->sspp_addr; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&sspp->sspp_addr; } } else { addr = (struct sockaddr *)&sspp->sspp_addr; } #else addr = (struct sockaddr *)&sspp->sspp_addr; #endif ifa = sctp_find_ifa_by_addr(addr, stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); if (ifa == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* * Must validate the ifa found is in * our ep */ struct sctp_laddr *laddr; int found = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", __func__); continue; } if ((sctp_is_addr_restricted(stcb, laddr->ifa)) && (!sctp_is_addr_pending(stcb, laddr->ifa))) { continue; } if (laddr->ifa == ifa) { found = 1; break; } } if (!found) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } } else { switch (addr->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } break; } #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } } if (sctp_set_primary_ip_address_sa(stcb, addr) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SOCKOPT, SCTP_SO_LOCKED); out_of_it: SCTP_TCB_UNLOCK(stcb); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_BINDX_ADD_ADDR: { struct sockaddr *sa; struct thread *td; td = (struct thread *)p; SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); #ifdef INET if (sa->sa_family == AF_INET) { if (optsize < sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)sa)->sin_addr)))) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (sa->sa_family == AF_INET6) { if (optsize < sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)sa)->sin6_addr), (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; break; } sctp_bindx_add_address(so, inp, sa, vrf_id, &error, p); break; } case SCTP_BINDX_REM_ADDR: { struct sockaddr *sa; struct thread *td; td = (struct thread *)p; SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); #ifdef INET if (sa->sa_family == AF_INET) { if (optsize < sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)sa)->sin_addr)))) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (sa->sa_family == AF_INET6) { if (optsize < sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)sa)->sin6_addr), (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; break; } sctp_bindx_delete_address(inp, sa, vrf_id, &error); break; } case SCTP_EVENT: { struct sctp_event *event; uint32_t event_type; SCTP_CHECK_AND_CAST(event, optval, struct sctp_event, optsize); SCTP_FIND_STCB(inp, stcb, event->se_assoc_id); switch (event->se_type) { case SCTP_ASSOC_CHANGE: event_type = SCTP_PCB_FLAGS_RECVASSOCEVNT; break; case SCTP_PEER_ADDR_CHANGE: event_type = SCTP_PCB_FLAGS_RECVPADDREVNT; break; case SCTP_REMOTE_ERROR: event_type = SCTP_PCB_FLAGS_RECVPEERERR; break; case SCTP_SEND_FAILED: event_type = SCTP_PCB_FLAGS_RECVSENDFAILEVNT; break; case SCTP_SHUTDOWN_EVENT: event_type = SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT; break; case SCTP_ADAPTATION_INDICATION: event_type = SCTP_PCB_FLAGS_ADAPTATIONEVNT; break; case SCTP_PARTIAL_DELIVERY_EVENT: event_type = SCTP_PCB_FLAGS_PDAPIEVNT; break; case SCTP_AUTHENTICATION_EVENT: event_type = SCTP_PCB_FLAGS_AUTHEVNT; break; case SCTP_STREAM_RESET_EVENT: event_type = SCTP_PCB_FLAGS_STREAM_RESETEVNT; break; case SCTP_SENDER_DRY_EVENT: event_type = SCTP_PCB_FLAGS_DRYEVNT; break; case SCTP_NOTIFICATIONS_STOPPED_EVENT: event_type = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP); error = ENOTSUP; break; case SCTP_ASSOC_RESET_EVENT: event_type = SCTP_PCB_FLAGS_ASSOC_RESETEVNT; break; case SCTP_STREAM_CHANGE_EVENT: event_type = SCTP_PCB_FLAGS_STREAM_CHANGEEVNT; break; case SCTP_SEND_FAILED_EVENT: event_type = SCTP_PCB_FLAGS_RECVNSENDFAILEVNT; break; default: event_type = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (event_type > 0) { if (stcb) { if (event->se_on) { sctp_stcb_feature_on(inp, stcb, event_type); if (event_type == SCTP_PCB_FLAGS_DRYEVNT) { if (TAILQ_EMPTY(&stcb->asoc.send_queue) && TAILQ_EMPTY(&stcb->asoc.sent_queue) && (stcb->asoc.stream_queue_cnt == 0)) { sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); } } } else { sctp_stcb_feature_off(inp, stcb, event_type); } SCTP_TCB_UNLOCK(stcb); } else { /* * We don't want to send up a storm * of events, so return an error for * sender dry events */ if ((event_type == SCTP_PCB_FLAGS_DRYEVNT) && (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((event->se_assoc_id == SCTP_ALL_ASSOC) || (event->se_assoc_id == SCTP_CURRENT_ASSOC))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP); error = ENOTSUP; break; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((event->se_assoc_id == SCTP_FUTURE_ASSOC) || (event->se_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); if (event->se_on) { sctp_feature_on(inp, event_type); } else { sctp_feature_off(inp, event_type); } SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((event->se_assoc_id == SCTP_CURRENT_ASSOC) || (event->se_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (event->se_on) { sctp_stcb_feature_on(inp, stcb, event_type); } else { sctp_stcb_feature_off(inp, stcb, event_type); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } } else { if (stcb) { SCTP_TCB_UNLOCK(stcb); } } break; } case SCTP_RECVRCVINFO: { int *onoff; SCTP_CHECK_AND_CAST(onoff, optval, int, optsize); SCTP_INP_WLOCK(inp); if (*onoff != 0) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO); } SCTP_INP_WUNLOCK(inp); break; } case SCTP_RECVNXTINFO: { int *onoff; SCTP_CHECK_AND_CAST(onoff, optval, int, optsize); SCTP_INP_WLOCK(inp); if (*onoff != 0) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO); } SCTP_INP_WUNLOCK(inp); break; } case SCTP_DEFAULT_SNDINFO: { struct sctp_sndinfo *info; uint16_t policy; SCTP_CHECK_AND_CAST(info, optval, struct sctp_sndinfo, optsize); SCTP_FIND_STCB(inp, stcb, info->snd_assoc_id); if (stcb) { if (info->snd_sid < stcb->asoc.streamoutcnt) { stcb->asoc.def_send.sinfo_stream = info->snd_sid; policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags); stcb->asoc.def_send.sinfo_flags = info->snd_flags; stcb->asoc.def_send.sinfo_flags |= policy; stcb->asoc.def_send.sinfo_ppid = info->snd_ppid; stcb->asoc.def_send.sinfo_context = info->snd_context; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((info->snd_assoc_id == SCTP_FUTURE_ASSOC) || (info->snd_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); inp->def_send.sinfo_stream = info->snd_sid; policy = PR_SCTP_POLICY(inp->def_send.sinfo_flags); inp->def_send.sinfo_flags = info->snd_flags; inp->def_send.sinfo_flags |= policy; inp->def_send.sinfo_ppid = info->snd_ppid; inp->def_send.sinfo_context = info->snd_context; SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((info->snd_assoc_id == SCTP_CURRENT_ASSOC) || (info->snd_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (info->snd_sid < stcb->asoc.streamoutcnt) { stcb->asoc.def_send.sinfo_stream = info->snd_sid; policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags); stcb->asoc.def_send.sinfo_flags = info->snd_flags; stcb->asoc.def_send.sinfo_flags |= policy; stcb->asoc.def_send.sinfo_ppid = info->snd_ppid; stcb->asoc.def_send.sinfo_context = info->snd_context; } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_DEFAULT_PRINFO: { struct sctp_default_prinfo *info; SCTP_CHECK_AND_CAST(info, optval, struct sctp_default_prinfo, optsize); SCTP_FIND_STCB(inp, stcb, info->pr_assoc_id); if (info->pr_policy > SCTP_PR_SCTP_MAX) { if (stcb) { SCTP_TCB_UNLOCK(stcb); } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (stcb) { stcb->asoc.def_send.sinfo_flags &= 0xfff0; stcb->asoc.def_send.sinfo_flags |= info->pr_policy; stcb->asoc.def_send.sinfo_timetolive = info->pr_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((info->pr_assoc_id == SCTP_FUTURE_ASSOC) || (info->pr_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); inp->def_send.sinfo_flags &= 0xfff0; inp->def_send.sinfo_flags |= info->pr_policy; inp->def_send.sinfo_timetolive = info->pr_value; SCTP_INP_WUNLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && ((info->pr_assoc_id == SCTP_CURRENT_ASSOC) || (info->pr_assoc_id == SCTP_ALL_ASSOC))) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.def_send.sinfo_flags &= 0xfff0; stcb->asoc.def_send.sinfo_flags |= info->pr_policy; stcb->asoc.def_send.sinfo_timetolive = info->pr_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_PEER_ADDR_THLDS: /* Applies to the specific association */ { struct sctp_paddrthlds *thlds; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, optsize); SCTP_FIND_STCB(inp, stcb, thlds->spt_assoc_id); #if defined(INET) && defined(INET6) if (thlds->spt_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&thlds->spt_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&thlds->spt_address; } } else { addr = (struct sockaddr *)&thlds->spt_address; } #else addr = (struct sockaddr *)&thlds->spt_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (thlds->spt_pathcpthld != 0xffff) { if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } if (stcb != NULL) { if (net != NULL) { net->failure_threshold = thlds->spt_pathmaxrxt; net->pf_threshold = thlds->spt_pathpfthld; if (net->dest_state & SCTP_ADDR_PF) { if ((net->error_count > net->failure_threshold) || (net->error_count <= net->pf_threshold)) { net->dest_state &= ~SCTP_ADDR_PF; } } else { if ((net->error_count > net->pf_threshold) && (net->error_count <= net->failure_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_17); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); } } if (net->dest_state & SCTP_ADDR_REACHABLE) { if (net->error_count > net->failure_threshold) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { if (net->error_count <= net->failure_threshold) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } } else { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { net->failure_threshold = thlds->spt_pathmaxrxt; net->pf_threshold = thlds->spt_pathpfthld; if (net->dest_state & SCTP_ADDR_PF) { if ((net->error_count > net->failure_threshold) || (net->error_count <= net->pf_threshold)) { net->dest_state &= ~SCTP_ADDR_PF; } } else { if ((net->error_count > net->pf_threshold) && (net->error_count <= net->failure_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_18); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); } } if (net->dest_state & SCTP_ADDR_REACHABLE) { if (net->error_count > net->failure_threshold) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { if (net->error_count <= net->failure_threshold) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } } stcb->asoc.def_net_failure = thlds->spt_pathmaxrxt; stcb->asoc.def_net_pf_threshold = thlds->spt_pathpfthld; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (thlds->spt_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); inp->sctp_ep.def_net_failure = thlds->spt_pathmaxrxt; inp->sctp_ep.def_net_pf_threshold = thlds->spt_pathpfthld; SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_REMOTE_UDP_ENCAPS_PORT: { struct sctp_udpencaps *encaps; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, optsize); SCTP_FIND_STCB(inp, stcb, encaps->sue_assoc_id); #if defined(INET) && defined(INET6) if (encaps->sue_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&encaps->sue_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&encaps->sue_address; } } else { addr = (struct sockaddr *)&encaps->sue_address; } #else addr = (struct sockaddr *)&encaps->sue_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (stcb != NULL) { if (net != NULL) { net->port = encaps->sue_port; } else { stcb->asoc.port = encaps->sue_port; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (encaps->sue_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); inp->sctp_ep.port = encaps->sue_port; SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_ECN_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->ecn_supported = 0; } else { inp->ecn_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_PR_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->prsctp_supported = 0; } else { inp->prsctp_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_AUTH_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { if ((av->assoc_value == 0) && (inp->asconf_supported == 1)) { /* * AUTH is required for * ASCONF */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->auth_supported = 0; } else { inp->auth_supported = 1; } SCTP_INP_WUNLOCK(inp); } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_ASCONF_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { if ((av->assoc_value != 0) && (inp->auth_supported == 0)) { /* * AUTH is required for * ASCONF */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->asconf_supported = 0; sctp_auth_delete_chunk(SCTP_ASCONF, inp->sctp_ep.local_auth_chunks); sctp_auth_delete_chunk(SCTP_ASCONF_ACK, inp->sctp_ep.local_auth_chunks); } else { inp->asconf_supported = 1; sctp_auth_add_chunk(SCTP_ASCONF, inp->sctp_ep.local_auth_chunks); sctp_auth_add_chunk(SCTP_ASCONF_ACK, inp->sctp_ep.local_auth_chunks); } SCTP_INP_WUNLOCK(inp); } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_RECONFIG_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->reconfig_supported = 0; } else { inp->reconfig_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_NRSACK_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->nrsack_supported = 0; } else { inp->nrsack_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_PKTDROP_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->pktdrop_supported = 0; } else { inp->pktdrop_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_MAX_CWND: { struct sctp_assoc_value *av; struct sctp_nets *net; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.max_cwnd = av->assoc_value; if (stcb->asoc.max_cwnd > 0) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if ((net->cwnd > stcb->asoc.max_cwnd) && (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) { net->cwnd = stcb->asoc.max_cwnd; if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { net->cwnd = net->mtu - sizeof(struct sctphdr); } } } } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (av->assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); inp->max_cwnd = av->assoc_value; SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_ACCEPT_ZERO_CHECKSUM: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); if ((*value == SCTP_EDMID_NONE) || (*value == SCTP_EDMID_LOWER_LAYER_DTLS)) { SCTP_INP_WLOCK(inp); inp->rcv_edmid = *value; SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; break; } /* end switch (opt) */ return (error); } int sctp_ctloutput(struct socket *so, struct sockopt *sopt) { struct epoch_tracker et; struct sctp_inpcb *inp; void *optval = NULL; void *p; size_t optsize = 0; int error = 0; if ((sopt->sopt_level == SOL_SOCKET) && (sopt->sopt_name == SO_SETFIB)) { inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); return (EINVAL); } SCTP_INP_WLOCK(inp); inp->fibnum = so->so_fibnum; SCTP_INP_WUNLOCK(inp); return (0); } if (sopt->sopt_level != IPPROTO_SCTP) { /* wrong proto level... send back up to IP */ #ifdef INET6 if (INP_CHECK_SOCKAF(so, AF_INET6)) error = ip6_ctloutput(so, sopt); #endif /* INET6 */ #if defined(INET) && defined(INET6) else #endif #ifdef INET error = ip_ctloutput(so, sopt); #endif return (error); } optsize = sopt->sopt_valsize; if (optsize > SCTP_SOCKET_OPTION_LIMIT) { SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); return (ENOBUFS); } if (optsize) { SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); if (optval == NULL) { SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); return (ENOBUFS); } error = sooptcopyin(sopt, optval, optsize, optsize); if (error) { SCTP_FREE(optval, SCTP_M_SOCKOPT); goto out; } } p = (void *)sopt->sopt_td; if (sopt->sopt_dir == SOPT_SET) { NET_EPOCH_ENTER(et); error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); NET_EPOCH_EXIT(et); } else if (sopt->sopt_dir == SOPT_GET) { error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); } else { SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if ((error == 0) && (optval != NULL)) { error = sooptcopyout(sopt, optval, optsize); SCTP_FREE(optval, SCTP_M_SOCKOPT); } else if (optval != NULL) { SCTP_FREE(optval, SCTP_M_SOCKOPT); } out: return (error); } #ifdef INET static int sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) { struct epoch_tracker et; int error = 0; int create_lock_on = 0; uint32_t vrf_id; struct sctp_inpcb *inp; struct sctp_tcb *stcb = NULL; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { /* I made the same as TCP since we are not setup? */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } if (addr == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return EINVAL; } switch (addr->sa_family) { #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; if (addr->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } sin6 = (struct sockaddr_in6 *)addr; if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6->sin6_addr)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); return (error); } break; } #endif #ifdef INET case AF_INET: { struct sockaddr_in *sin; if (addr->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } sin = (struct sockaddr_in *)addr; if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sin->sin_addr)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); return (error); } break; } #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); return (EAFNOSUPPORT); } SCTP_INP_INCR_REF(inp); SCTP_ASOC_CREATE_LOCK(inp); create_lock_on = 1; NET_EPOCH_ENTER(et); if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { /* Should I really unlock ? */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); error = EFAULT; goto out_now; } #ifdef INET6 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && (addr->sa_family == AF_INET6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_now; } #endif if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ error = sctp_inpcb_bind(so, NULL, NULL, p); if (error) { goto out_now; } } /* Now do we connect? */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_now; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); error = EADDRINUSE; goto out_now; } if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); SCTP_INP_RUNLOCK(inp); } else { /* * We increment here since sctp_findassociation_ep_addr() * will do a decrement if it finds the stcb as long as the * locked tcb (last argument) is NOT a TCB.. aka NULL. */ SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } else { SCTP_TCB_UNLOCK(stcb); } } if (stcb != NULL) { /* Already have or am bring up an association */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; goto out_now; } vrf_id = inp->def_vrf_id; /* We are GOOD to go */ stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id, inp->sctp_ep.pre_open_stream_count, inp->sctp_ep.port, p, SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Gak! no memory */ goto out_now; } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); out_now: NET_EPOCH_EXIT(et); if (create_lock_on) { SCTP_ASOC_CREATE_UNLOCK(inp); } SCTP_INP_DECR_REF(inp); return (error); } #endif int sctp_listen(struct socket *so, int backlog, struct thread *p) { /* * Note this module depends on the protocol processing being called * AFTER any socket level flags and backlog are applied to the * socket. The traditional way that the socket flags are applied is * AFTER protocol processing. We have made a change to the * sys/kern/uipc_socket.c module to reverse this but this MUST be in * place if the socket API for SCTP is to work properly. */ int error = 0; struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { /* I made the same as TCP since we are not setup? */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) { /* See if we have a listener */ struct sctp_inpcb *tinp; union sctp_sockstore store; if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* not bound all */ struct sctp_laddr *laddr; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { memcpy(&store, &laddr->ifa->address, sizeof(store)); switch (store.sa.sa_family) { #ifdef INET case AF_INET: store.sin.sin_port = inp->sctp_lport; break; #endif #ifdef INET6 case AF_INET6: store.sin6.sin6_port = inp->sctp_lport; break; #endif default: break; } tinp = sctp_pcb_findep(&store.sa, 0, 0, inp->def_vrf_id); if (tinp && (tinp != inp) && ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (SCTP_IS_LISTENING(tinp))) { /* * we have a listener already and * its not this inp. */ SCTP_INP_DECR_REF(tinp); return (EADDRINUSE); } else if (tinp) { SCTP_INP_DECR_REF(tinp); } } } else { /* Setup a local addr bound all */ memset(&store, 0, sizeof(store)); #ifdef INET6 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { store.sa.sa_family = AF_INET6; store.sa.sa_len = sizeof(struct sockaddr_in6); } #endif #ifdef INET if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { store.sa.sa_family = AF_INET; store.sa.sa_len = sizeof(struct sockaddr_in); } #endif switch (store.sa.sa_family) { #ifdef INET case AF_INET: store.sin.sin_port = inp->sctp_lport; break; #endif #ifdef INET6 case AF_INET6: store.sin6.sin6_port = inp->sctp_lport; break; #endif default: break; } tinp = sctp_pcb_findep(&store.sa, 0, 0, inp->def_vrf_id); if (tinp && (tinp != inp) && ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (SCTP_IS_LISTENING(tinp))) { /* * we have a listener already and its not * this inp. */ SCTP_INP_DECR_REF(tinp); return (EADDRINUSE); } else if (tinp) { SCTP_INP_DECR_REF(tinp); } } } SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); #ifdef SCTP_LOCK_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); } #endif if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* * The unlucky case - We are in the tcp pool with this guy. * - Someone else is in the main inp slot. - We must move * this guy (the listener) to the main slot - We must then * move the guy that was listener to the TCP Pool. */ if (sctp_swap_inpcb_for_listen(inp)) { error = EADDRINUSE; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); goto out; } } SOCK_LOCK(so); error = solisten_proto_check(so); if (error) { SOCK_UNLOCK(so); goto out; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { SOCK_UNLOCK(so); solisten_proto_abort(so); error = EADDRINUSE; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); goto out; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) || (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED))) { SOCK_UNLOCK(so); solisten_proto_abort(so); error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); goto out; } if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { if ((error = sctp_inpcb_bind_locked(inp, NULL, NULL, p))) { SOCK_UNLOCK(so); solisten_proto_abort(so); /* bind error, probably perm */ goto out; } } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) { solisten_proto(so, backlog); SOCK_UNLOCK(so); inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING; } else { solisten_proto_abort(so); SOCK_UNLOCK(so); if (backlog > 0) { inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING; } else { inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING; } } out: SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (error); } static int sctp_defered_wakeup_cnt = 0; int sctp_accept(struct socket *so, struct sockaddr *sa) { struct sctp_tcb *stcb; struct sctp_inpcb *inp; union sctp_sockstore store; #ifdef INET6 int error; #endif inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } SCTP_INP_WLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { SCTP_INP_WUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); return (EOPNOTSUPP); } if (so->so_state & SS_ISDISCONNECTED) { SCTP_INP_WUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); return (ECONNABORTED); } stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_WUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } SCTP_TCB_LOCK(stcb); store = stcb->asoc.primary_destination->ro._l_addr; SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); /* Wake any delayed sleep action */ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; SOCKBUF_LOCK(&inp->sctp_socket->so_snd); if (sowriteable(inp->sctp_socket)) { sowwakeup_locked(inp->sctp_socket); } else { SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); } } if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); if (soreadable(inp->sctp_socket)) { sctp_defered_wakeup_cnt++; sorwakeup_locked(inp->sctp_socket); } else { SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); } } } SCTP_INP_WUNLOCK(inp); if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_19); } else { SCTP_TCB_UNLOCK(stcb); } switch (store.sa.sa_family) { #ifdef INET case AF_INET: *(struct sockaddr_in *)sa = (struct sockaddr_in ){ .sin_family = AF_INET, .sin_len = sizeof(struct sockaddr_in), .sin_port = store.sin.sin_port, .sin_addr = store.sin.sin_addr, }; break; #endif #ifdef INET6 case AF_INET6: *(struct sockaddr_in6 *)sa = (struct sockaddr_in6 ){ .sin6_family = AF_INET6, .sin6_len = sizeof(struct sockaddr_in6), .sin6_port = store.sin6.sin6_port, .sin6_addr = store.sin6.sin6_addr, }; if ((error = sa6_recoverscope((struct sockaddr_in6 *)sa)) != 0) return (error); break; #endif default: /* TSNH */ break; } return (0); } #ifdef INET int -sctp_ingetaddr(struct socket *so, struct sockaddr **addr) +sctp_ingetaddr(struct socket *so, struct sockaddr *sa) { - struct sockaddr_in *sin; + struct sockaddr_in *sin = (struct sockaddr_in *)sa; uint32_t vrf_id; struct sctp_inpcb *inp; struct sctp_ifa *sctp_ifa; - /* - * Do the malloc first in case it blocks. - */ - SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); - if (sin == NULL) - return (ENOMEM); - sin->sin_family = AF_INET; - sin->sin_len = sizeof(*sin); + *sin = (struct sockaddr_in ){ + .sin_len = sizeof(struct sockaddr_in), + .sin_family = AF_INET, + }; + inp = (struct sctp_inpcb *)so->so_pcb; if (!inp) { - SCTP_FREE_SONAME(sin); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } SCTP_INP_RLOCK(inp); sin->sin_port = inp->sctp_lport; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { struct sctp_tcb *stcb; struct sockaddr_in *sin_a; struct sctp_nets *net; int fnd; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { goto notConn; } fnd = 0; sin_a = NULL; SCTP_TCB_LOCK(stcb); TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a = (struct sockaddr_in *)&net->ro._l_addr; if (sin_a == NULL) /* this will make coverity happy */ continue; if (sin_a->sin_family == AF_INET) { fnd = 1; break; } } if ((!fnd) || (sin_a == NULL)) { /* punt */ SCTP_TCB_UNLOCK(stcb); goto notConn; } vrf_id = inp->def_vrf_id; sctp_ifa = sctp_source_address_selection(inp, stcb, (sctp_route_t *)&net->ro, net, 0, vrf_id); if (sctp_ifa) { sin->sin_addr = sctp_ifa->address.sin.sin_addr; sctp_free_ifa(sctp_ifa); } SCTP_TCB_UNLOCK(stcb); } else { /* For the bound all case you get back 0 */ notConn: sin->sin_addr.s_addr = 0; } } else { /* Take the first IPv4 address in the list */ struct sctp_laddr *laddr; int fnd = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa->address.sa.sa_family == AF_INET) { struct sockaddr_in *sin_a; sin_a = &laddr->ifa->address.sin; sin->sin_addr = sin_a->sin_addr; fnd = 1; break; } } if (!fnd) { - SCTP_FREE_SONAME(sin); SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); return (ENOENT); } } SCTP_INP_RUNLOCK(inp); - (*addr) = (struct sockaddr *)sin; + return (0); } int -sctp_peeraddr(struct socket *so, struct sockaddr **addr) +sctp_peeraddr(struct socket *so, struct sockaddr *sa) { - struct sockaddr_in *sin; + struct sockaddr_in *sin = (struct sockaddr_in *)sa; int fnd; struct sockaddr_in *sin_a; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; - /* Do the malloc first in case it blocks. */ - SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); - if (sin == NULL) - return (ENOMEM); - sin->sin_family = AF_INET; - sin->sin_len = sizeof(*sin); + *sin = (struct sockaddr_in ){ + .sin_len = sizeof(struct sockaddr_in), + .sin_family = AF_INET, + }; inp = (struct sctp_inpcb *)so->so_pcb; if ((inp == NULL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { /* UDP type and listeners will drop out here */ - SCTP_FREE_SONAME(sin); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); return (ENOTCONN); } SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); if (stcb == NULL) { - SCTP_FREE_SONAME(sin); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } fnd = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a = (struct sockaddr_in *)&net->ro._l_addr; if (sin_a->sin_family == AF_INET) { fnd = 1; sin->sin_port = stcb->rport; sin->sin_addr = sin_a->sin_addr; break; } } SCTP_TCB_UNLOCK(stcb); if (!fnd) { /* No IPv4 address */ - SCTP_FREE_SONAME(sin); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); return (ENOENT); } - (*addr) = (struct sockaddr *)sin; + return (0); } #define SCTP_PROTOSW \ .pr_protocol = IPPROTO_SCTP, \ .pr_ctloutput = sctp_ctloutput, \ .pr_abort = sctp_abort, \ .pr_accept = sctp_accept, \ .pr_attach = sctp_attach, \ .pr_bind = sctp_bind, \ .pr_connect = sctp_connect, \ .pr_control = in_control, \ .pr_close = sctp_close, \ .pr_detach = sctp_close, \ .pr_sopoll = sopoll_generic, \ .pr_flush = sctp_flush, \ .pr_disconnect = sctp_disconnect, \ .pr_listen = sctp_listen, \ .pr_peeraddr = sctp_peeraddr, \ .pr_send = sctp_sendm, \ .pr_shutdown = sctp_shutdown, \ .pr_sockaddr = sctp_ingetaddr, \ .pr_sosend = sctp_sosend, \ .pr_soreceive = sctp_soreceive \ struct protosw sctp_seqpacket_protosw = { .pr_type = SOCK_SEQPACKET, .pr_flags = PR_WANTRCVD, SCTP_PROTOSW }; struct protosw sctp_stream_protosw = { .pr_type = SOCK_STREAM, .pr_flags = PR_CONNREQUIRED | PR_WANTRCVD, SCTP_PROTOSW }; #endif diff --git a/sys/netinet/sctp_var.h b/sys/netinet/sctp_var.h index ff6672bd0248..54566e9ac0df 100644 --- a/sys/netinet/sctp_var.h +++ b/sys/netinet/sctp_var.h @@ -1,348 +1,348 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _NETINET_SCTP_VAR_H_ #define _NETINET_SCTP_VAR_H_ #include #if defined(_KERNEL) || defined(__Userspace__) extern struct protosw sctp_seqpacket_protosw, sctp_stream_protosw; #define sctp_feature_on(inp, feature) (inp->sctp_features |= feature) #define sctp_feature_off(inp, feature) (inp->sctp_features &= ~feature) #define sctp_is_feature_on(inp, feature) ((inp->sctp_features & feature) == feature) #define sctp_is_feature_off(inp, feature) ((inp->sctp_features & feature) == 0) #define sctp_stcb_feature_on(inp, stcb, feature) {\ if (stcb) { \ stcb->asoc.sctp_features |= feature; \ } else if (inp) { \ inp->sctp_features |= feature; \ } \ } #define sctp_stcb_feature_off(inp, stcb, feature) {\ if (stcb) { \ stcb->asoc.sctp_features &= ~feature; \ } else if (inp) { \ inp->sctp_features &= ~feature; \ } \ } #define sctp_stcb_is_feature_on(inp, stcb, feature) \ (((stcb != NULL) && \ ((stcb->asoc.sctp_features & feature) == feature)) || \ ((stcb == NULL) && (inp != NULL) && \ ((inp->sctp_features & feature) == feature))) #define sctp_stcb_is_feature_off(inp, stcb, feature) \ (((stcb != NULL) && \ ((stcb->asoc.sctp_features & feature) == 0)) || \ ((stcb == NULL) && (inp != NULL) && \ ((inp->sctp_features & feature) == 0)) || \ ((stcb == NULL) && (inp == NULL))) /* managing mobility_feature in inpcb (by micchie) */ #define sctp_mobility_feature_on(inp, feature) (inp->sctp_mobility_features |= feature) #define sctp_mobility_feature_off(inp, feature) (inp->sctp_mobility_features &= ~feature) #define sctp_is_mobility_feature_on(inp, feature) (inp->sctp_mobility_features & feature) #define sctp_is_mobility_feature_off(inp, feature) ((inp->sctp_mobility_features & feature) == 0) #define sctp_maxspace(sb) (max((sb)->sb_hiwat,SCTP_MINIMAL_RWND)) #define sctp_sbspace(asoc, sb) ((long) ((sctp_maxspace(sb) > (asoc)->sb_cc) ? (sctp_maxspace(sb) - (asoc)->sb_cc) : 0)) #define sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > SCTP_SBAVAIL(sb)) ? (sctp_maxspace(sb) - SCTP_SBAVAIL(sb)) : 0)) #define sctp_sbspace_sub(a,b) (((a) > (b)) ? ((a) - (b)) : 0) /* * I tried to cache the readq entries at one point. But the reality * is that it did not add any performance since this meant we had to * lock the STCB on read. And at that point once you have to do an * extra lock, it really does not matter if the lock is in the ZONE * stuff or in our code. Note that this same problem would occur with * an mbuf cache as well so it is not really worth doing, at least * right now :-D */ #ifdef INVARIANTS #define sctp_free_a_readq(_stcb, _readq) { \ if ((_readq)->on_strm_q) \ panic("On strm q stcb:%p readq:%p", (_stcb), (_readq)); \ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \ SCTP_DECR_READQ_COUNT(); \ } #else #define sctp_free_a_readq(_stcb, _readq) { \ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \ SCTP_DECR_READQ_COUNT(); \ } #endif #define sctp_alloc_a_readq(_stcb, _readq) { \ (_readq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_readq), struct sctp_queued_to_read); \ if ((_readq)) { \ SCTP_INCR_READQ_COUNT(); \ } \ } #define sctp_free_a_strmoq(_stcb, _strmoq, _so_locked) { \ if ((_strmoq)->holds_key_ref) { \ sctp_auth_key_release(stcb, sp->auth_keyid, _so_locked); \ (_strmoq)->holds_key_ref = 0; \ } \ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \ SCTP_DECR_STRMOQ_COUNT(); \ } #define sctp_alloc_a_strmoq(_stcb, _strmoq) { \ (_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \ if ((_strmoq)) { \ memset(_strmoq, 0, sizeof(struct sctp_stream_queue_pending)); \ SCTP_INCR_STRMOQ_COUNT(); \ (_strmoq)->holds_key_ref = 0; \ } \ } #define sctp_free_a_chunk(_stcb, _chk, _so_locked) { \ if ((_chk)->holds_key_ref) {\ sctp_auth_key_release((_stcb), (_chk)->auth_keyid, _so_locked); \ (_chk)->holds_key_ref = 0; \ } \ if (_stcb) { \ SCTP_TCB_LOCK_ASSERT((_stcb)); \ if ((_chk)->whoTo) { \ sctp_free_remote_addr((_chk)->whoTo); \ (_chk)->whoTo = NULL; \ } \ if (((_stcb)->asoc.free_chunk_cnt > SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit)) || \ (SCTP_BASE_INFO(ipi_free_chunks) > SCTP_BASE_SYSCTL(sctp_system_free_resc_limit))) { \ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \ SCTP_DECR_CHK_COUNT(); \ } else { \ TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \ (_stcb)->asoc.free_chunk_cnt++; \ atomic_add_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \ } \ } else { \ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \ SCTP_DECR_CHK_COUNT(); \ } \ } #define sctp_alloc_a_chunk(_stcb, _chk) { \ if (TAILQ_EMPTY(&(_stcb)->asoc.free_chunks)) { \ (_chk) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_chunk), struct sctp_tmit_chunk); \ if ((_chk)) { \ SCTP_INCR_CHK_COUNT(); \ (_chk)->whoTo = NULL; \ (_chk)->holds_key_ref = 0; \ } \ } else { \ (_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \ TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \ atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \ (_chk)->holds_key_ref = 0; \ SCTP_STAT_INCR(sctps_cached_chk); \ (_stcb)->asoc.free_chunk_cnt--; \ } \ } #define sctp_free_remote_addr(__net) { \ if ((__net)) { \ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \ RO_NHFREE(&(__net)->ro); \ if ((__net)->src_addr_selected) { \ sctp_free_ifa((__net)->ro._s_addr); \ (__net)->ro._s_addr = NULL; \ } \ (__net)->src_addr_selected = 0; \ (__net)->dest_state &= ~SCTP_ADDR_REACHABLE; \ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \ SCTP_DECR_RADDR_COUNT(); \ } \ } \ } #define sctp_sbfree(ctl, stcb, sb, m) { \ SCTP_SB_DECR(sb, SCTP_BUF_LEN((m))); \ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \ if (((ctl)->do_not_ref_stcb == 0) && stcb) {\ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \ } \ if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \ SCTP_BUF_TYPE(m) != MT_OOBDATA) \ atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \ } #define sctp_sballoc(stcb, sb, m) { \ SCTP_SB_INCR(sb, SCTP_BUF_LEN((m))); \ atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \ if (stcb) { \ atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \ atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \ } \ if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \ SCTP_BUF_TYPE(m) != MT_OOBDATA) \ atomic_add_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \ } #define sctp_ucount_incr(val) { \ val++; \ } #define sctp_ucount_decr(val) { \ if (val > 0) { \ val--; \ } else { \ val = 0; \ } \ } #define sctp_mbuf_crush(data) do { \ struct mbuf *_m; \ _m = (data); \ while (_m && (SCTP_BUF_LEN(_m) == 0)) { \ (data) = SCTP_BUF_NEXT(_m); \ SCTP_BUF_NEXT(_m) = NULL; \ sctp_m_free(_m); \ _m = (data); \ } \ } while (0) #define sctp_flight_size_decrease(tp1) do { \ if (tp1->whoTo->flight_size >= tp1->book_size) \ tp1->whoTo->flight_size -= tp1->book_size; \ else \ tp1->whoTo->flight_size = 0; \ } while (0) #define sctp_flight_size_increase(tp1) do { \ (tp1)->whoTo->flight_size += (tp1)->book_size; \ } while (0) #ifdef SCTP_FS_SPEC_LOG #define sctp_total_flight_decrease(stcb, tp1) do { \ if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \ stcb->asoc.fs_index = 0;\ stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \ stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \ stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \ stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \ stcb->asoc.fslog[stcb->asoc.fs_index].incr = 0; \ stcb->asoc.fslog[stcb->asoc.fs_index].decr = 1; \ stcb->asoc.fs_index++; \ tp1->window_probe = 0; \ if (stcb->asoc.total_flight >= tp1->book_size) { \ stcb->asoc.total_flight -= tp1->book_size; \ if (stcb->asoc.total_flight_count > 0) \ stcb->asoc.total_flight_count--; \ } else { \ stcb->asoc.total_flight = 0; \ stcb->asoc.total_flight_count = 0; \ } \ } while (0) #define sctp_total_flight_increase(stcb, tp1) do { \ if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \ stcb->asoc.fs_index = 0;\ stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \ stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \ stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \ stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \ stcb->asoc.fslog[stcb->asoc.fs_index].incr = 1; \ stcb->asoc.fslog[stcb->asoc.fs_index].decr = 0; \ stcb->asoc.fs_index++; \ (stcb)->asoc.total_flight_count++; \ (stcb)->asoc.total_flight += (tp1)->book_size; \ } while (0) #else #define sctp_total_flight_decrease(stcb, tp1) do { \ tp1->window_probe = 0; \ if (stcb->asoc.total_flight >= tp1->book_size) { \ stcb->asoc.total_flight -= tp1->book_size; \ if (stcb->asoc.total_flight_count > 0) \ stcb->asoc.total_flight_count--; \ } else { \ stcb->asoc.total_flight = 0; \ stcb->asoc.total_flight_count = 0; \ } \ } while (0) #define sctp_total_flight_increase(stcb, tp1) do { \ (stcb)->asoc.total_flight_count++; \ (stcb)->asoc.total_flight += (tp1)->book_size; \ } while (0) #endif #define SCTP_PF_ENABLED(_net) (_net->pf_threshold < _net->failure_threshold) #define SCTP_NET_IS_PF(_net) (_net->pf_threshold < _net->error_count) struct sctp_nets; struct sctp_inpcb; struct sctp_tcb; struct sctphdr; void sctp_close(struct socket *so); void sctp_abort(struct socket *so); int sctp_disconnect(struct socket *so); ipproto_ctlinput_t sctp_ctlinput; int sctp_ctloutput(struct socket *, struct sockopt *); #ifdef INET void sctp_input_with_port(struct mbuf *, int, uint16_t); int sctp_input(struct mbuf **, int *, int); #endif void sctp_pathmtu_adjustment(struct sctp_tcb *, uint32_t, bool); void sctp_notify(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *, uint8_t, uint8_t, uint16_t, uint32_t); int sctp_flush(struct socket *, int); int sctp_shutdown(struct socket *); int sctp_bindx(struct socket *, int, struct sockaddr_storage *, int, int, struct proc *); /* can't use sctp_assoc_t here */ int sctp_peeloff(struct socket *, struct socket *, int, caddr_t, int *); -int sctp_ingetaddr(struct socket *, struct sockaddr **); -int sctp_peeraddr(struct socket *, struct sockaddr **); +int sctp_ingetaddr(struct socket *, struct sockaddr *); +int sctp_peeraddr(struct socket *, struct sockaddr *); int sctp_listen(struct socket *, int, struct thread *); int sctp_accept(struct socket *, struct sockaddr *); #endif /* _KERNEL */ #endif /* !_NETINET_SCTP_VAR_H_ */ diff --git a/sys/netinet6/in6.c b/sys/netinet6/in6.c index e61d5e3e45bd..aca98d2b86b2 100644 --- a/sys/netinet6/in6.c +++ b/sys/netinet6/in6.c @@ -1,2766 +1,2752 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $KAME: in6.c,v 1.259 2002/01/21 11:37:50 keiichi Exp $ */ /*- * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef MAC #include #endif /* * struct in6_ifreq and struct ifreq must be type punnable for common members * of ifr_ifru to allow accessors to be shared. */ _Static_assert(offsetof(struct in6_ifreq, ifr_ifru) == offsetof(struct ifreq, ifr_ifru), "struct in6_ifreq and struct ifreq are not type punnable"); VNET_DECLARE(int, icmp6_nodeinfo_oldmcprefix); #define V_icmp6_nodeinfo_oldmcprefix VNET(icmp6_nodeinfo_oldmcprefix) /* * Definitions of some costant IP6 addresses. */ const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; const struct in6_addr in6addr_nodelocal_allnodes = IN6ADDR_NODELOCAL_ALLNODES_INIT; const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT; const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; const struct in6_addr in6addr_linklocal_allv2routers = IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT; const struct in6_addr in6mask0 = IN6MASK0; const struct in6_addr in6mask32 = IN6MASK32; const struct in6_addr in6mask64 = IN6MASK64; const struct in6_addr in6mask96 = IN6MASK96; const struct in6_addr in6mask128 = IN6MASK128; const struct sockaddr_in6 sa6_any = { sizeof(sa6_any), AF_INET6, 0, 0, IN6ADDR_ANY_INIT, 0 }; static int in6_notify_ifa(struct ifnet *, struct in6_ifaddr *, struct in6_aliasreq *, int); static void in6_unlink_ifa(struct in6_ifaddr *, struct ifnet *); static int in6_validate_ifra(struct ifnet *, struct in6_aliasreq *, struct in6_ifaddr *, int); static struct in6_ifaddr *in6_alloc_ifa(struct ifnet *, struct in6_aliasreq *, int flags); static int in6_update_ifa_internal(struct ifnet *, struct in6_aliasreq *, struct in6_ifaddr *, int, int); static int in6_broadcast_ifa(struct ifnet *, struct in6_aliasreq *, struct in6_ifaddr *, int); static void in6_join_proxy_ndp_mc(struct ifnet *, const struct in6_addr *); static void in6_leave_proxy_ndp_mc(struct ifnet *, const struct in6_addr *); #define ifa2ia6(ifa) ((struct in6_ifaddr *)(ifa)) #define ia62ifa(ia6) (&((ia6)->ia_ifa)) static struct sx in6_control_sx; SX_SYSINIT(in6_control_sx, &in6_control_sx, "in6_control"); void in6_newaddrmsg(struct in6_ifaddr *ia, int cmd) { struct rt_addrinfo info; struct ifaddr *ifa; struct sockaddr_dl gateway; int fibnum; ifa = &ia->ia_ifa; /* * Prepare info data for the host route. * This code mimics one from ifa_maintain_loopback_route(). */ bzero(&info, sizeof(struct rt_addrinfo)); info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC | RTF_PINNED; info.rti_info[RTAX_DST] = ifa->ifa_addr; info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&gateway; link_init_sdl(ifa->ifa_ifp, (struct sockaddr *)&gateway, ifa->ifa_ifp->if_type); if (cmd != RTM_DELETE) info.rti_ifp = V_loif; fibnum = ia62ifa(ia)->ifa_ifp->if_fib; if (cmd == RTM_ADD) { rt_addrmsg(cmd, &ia->ia_ifa, fibnum); rt_routemsg_info(cmd, &info, fibnum); } else if (cmd == RTM_DELETE) { rt_routemsg_info(cmd, &info, fibnum); rt_addrmsg(cmd, &ia->ia_ifa, fibnum); } } int in6_mask2len(struct in6_addr *mask, u_char *lim0) { int x = 0, y; u_char *lim = lim0, *p; /* ignore the scope_id part */ if (lim0 == NULL || lim0 - (u_char *)mask > sizeof(*mask)) lim = (u_char *)mask + sizeof(*mask); for (p = (u_char *)mask; p < lim; x++, p++) { if (*p != 0xff) break; } y = 0; if (p < lim) { for (y = 0; y < 8; y++) { if ((*p & (0x80 >> y)) == 0) break; } } /* * when the limit pointer is given, do a stricter check on the * remaining bits. */ if (p < lim) { if (y != 0 && (*p & (0x00ff >> y)) != 0) return (-1); for (p = p + 1; p < lim; p++) if (*p != 0) return (-1); } return x * 8 + y; } #ifdef COMPAT_FREEBSD32 struct in6_ndifreq32 { char ifname[IFNAMSIZ]; uint32_t ifindex; }; #define SIOCGDEFIFACE32_IN6 _IOWR('i', 86, struct in6_ndifreq32) #endif int in6_control_ioctl(u_long cmd, void *data, struct ifnet *ifp, struct ucred *cred) { struct in6_ifreq *ifr = (struct in6_ifreq *)data; struct in6_ifaddr *ia = NULL; struct in6_aliasreq *ifra = (struct in6_aliasreq *)data; struct sockaddr_in6 *sa6; int error; bool control_locked = false; /* * Compat to make pre-10.x ifconfig(8) operable. */ if (cmd == OSIOCAIFADDR_IN6) { cmd = SIOCAIFADDR_IN6; ifra->ifra_vhid = 0; } switch (cmd) { case SIOCGETSGCNT_IN6: case SIOCGETMIFCNT_IN6: /* * XXX mrt_ioctl has a 3rd, unused, FIB argument in route.c. * We cannot see how that would be needed, so do not adjust the * KPI blindly; more likely should clean up the IPv4 variant. */ return (mrt6_ioctl ? mrt6_ioctl(cmd, data) : EOPNOTSUPP); } switch (cmd) { case SIOCAADDRCTL_POLICY: case SIOCDADDRCTL_POLICY: if (cred != NULL) { error = priv_check_cred(cred, PRIV_NETINET_ADDRCTRL6); if (error) return (error); } return (in6_src_ioctl(cmd, data)); } if (ifp == NULL) return (EOPNOTSUPP); switch (cmd) { case SIOCSNDFLUSH_IN6: case SIOCSPFXFLUSH_IN6: case SIOCSRTRFLUSH_IN6: case SIOCSDEFIFACE_IN6: case SIOCSIFINFO_FLAGS: case SIOCSIFINFO_IN6: if (cred != NULL) { error = priv_check_cred(cred, PRIV_NETINET_ND6); if (error) return (error); } /* FALLTHROUGH */ case OSIOCGIFINFO_IN6: case SIOCGIFINFO_IN6: case SIOCGNBRINFO_IN6: case SIOCGDEFIFACE_IN6: return (nd6_ioctl(cmd, data, ifp)); #ifdef COMPAT_FREEBSD32 case SIOCGDEFIFACE32_IN6: { struct in6_ndifreq ndif; struct in6_ndifreq32 *ndif32; error = nd6_ioctl(SIOCGDEFIFACE_IN6, (caddr_t)&ndif, ifp); if (error) return (error); ndif32 = (struct in6_ndifreq32 *)data; ndif32->ifindex = ndif.ifindex; return (0); } #endif } switch (cmd) { case SIOCSIFPREFIX_IN6: case SIOCDIFPREFIX_IN6: case SIOCAIFPREFIX_IN6: case SIOCCIFPREFIX_IN6: case SIOCSGIFPREFIX_IN6: case SIOCGIFPREFIX_IN6: log(LOG_NOTICE, "prefix ioctls are now invalidated. " "please use ifconfig.\n"); return (EOPNOTSUPP); } switch (cmd) { case SIOCSSCOPE6: if (cred != NULL) { error = priv_check_cred(cred, PRIV_NETINET_SCOPE6); if (error) return (error); } /* FALLTHROUGH */ case SIOCGSCOPE6: case SIOCGSCOPE6DEF: return (scope6_ioctl(cmd, data, ifp)); } /* * Find address for this interface, if it exists. * * In netinet code, we have checked ifra_addr in SIOCSIF*ADDR operation * only, and used the first interface address as the target of other * operations (without checking ifra_addr). This was because netinet * code/API assumed at most 1 interface address per interface. * Since IPv6 allows a node to assign multiple addresses * on a single interface, we almost always look and check the * presence of ifra_addr, and reject invalid ones here. * It also decreases duplicated code among SIOC*_IN6 operations. */ switch (cmd) { case SIOCAIFADDR_IN6: case SIOCSIFPHYADDR_IN6: sa6 = &ifra->ifra_addr; break; case SIOCSIFADDR_IN6: case SIOCGIFADDR_IN6: case SIOCSIFDSTADDR_IN6: case SIOCSIFNETMASK_IN6: case SIOCGIFDSTADDR_IN6: case SIOCGIFNETMASK_IN6: case SIOCDIFADDR_IN6: case SIOCGIFPSRCADDR_IN6: case SIOCGIFPDSTADDR_IN6: case SIOCGIFAFLAG_IN6: case SIOCSNDFLUSH_IN6: case SIOCSPFXFLUSH_IN6: case SIOCSRTRFLUSH_IN6: case SIOCGIFALIFETIME_IN6: case SIOCGIFSTAT_IN6: case SIOCGIFSTAT_ICMP6: sa6 = &ifr->ifr_addr; break; case SIOCSIFADDR: case SIOCSIFBRDADDR: case SIOCSIFDSTADDR: case SIOCSIFNETMASK: /* * Although we should pass any non-INET6 ioctl requests * down to driver, we filter some legacy INET requests. * Drivers trust SIOCSIFADDR et al to come from an already * privileged layer, and do not perform any credentials * checks or input validation. */ return (EINVAL); default: sa6 = NULL; break; } if (sa6 && sa6->sin6_family == AF_INET6) { if (sa6->sin6_scope_id != 0) error = sa6_embedscope(sa6, 0); else error = in6_setscope(&sa6->sin6_addr, ifp, NULL); if (error != 0) return (error); if (cred != NULL && (error = prison_check_ip6(cred, &sa6->sin6_addr)) != 0) return (error); sx_xlock(&in6_control_sx); control_locked = true; ia = in6ifa_ifpwithaddr(ifp, &sa6->sin6_addr); } else ia = NULL; switch (cmd) { case SIOCSIFADDR_IN6: case SIOCSIFDSTADDR_IN6: case SIOCSIFNETMASK_IN6: /* * Since IPv6 allows a node to assign multiple addresses * on a single interface, SIOCSIFxxx ioctls are deprecated. */ /* we decided to obsolete this command (20000704) */ error = EINVAL; goto out; case SIOCDIFADDR_IN6: /* * for IPv4, we look for existing in_ifaddr here to allow * "ifconfig if0 delete" to remove the first IPv4 address on * the interface. For IPv6, as the spec allows multiple * interface address from the day one, we consider "remove the * first one" semantics to be not preferable. */ if (ia == NULL) { error = EADDRNOTAVAIL; goto out; } /* FALLTHROUGH */ case SIOCAIFADDR_IN6: /* * We always require users to specify a valid IPv6 address for * the corresponding operation. */ if (ifra->ifra_addr.sin6_family != AF_INET6 || ifra->ifra_addr.sin6_len != sizeof(struct sockaddr_in6)) { error = EAFNOSUPPORT; goto out; } if (cred != NULL) { error = priv_check_cred(cred, (cmd == SIOCDIFADDR_IN6) ? PRIV_NET_DELIFADDR : PRIV_NET_ADDIFADDR); if (error) goto out; } /* FALLTHROUGH */ case SIOCGIFSTAT_IN6: case SIOCGIFSTAT_ICMP6: if (ifp->if_afdata[AF_INET6] == NULL) { error = EPFNOSUPPORT; goto out; } break; case SIOCGIFADDR_IN6: /* This interface is basically deprecated. use SIOCGIFCONF. */ /* FALLTHROUGH */ case SIOCGIFAFLAG_IN6: case SIOCGIFNETMASK_IN6: case SIOCGIFDSTADDR_IN6: case SIOCGIFALIFETIME_IN6: /* must think again about its semantics */ if (ia == NULL) { error = EADDRNOTAVAIL; goto out; } break; } switch (cmd) { case SIOCGIFADDR_IN6: ifr->ifr_addr = ia->ia_addr; if ((error = sa6_recoverscope(&ifr->ifr_addr)) != 0) goto out; break; case SIOCGIFDSTADDR_IN6: if ((ifp->if_flags & IFF_POINTOPOINT) == 0) { error = EINVAL; goto out; } ifr->ifr_dstaddr = ia->ia_dstaddr; if ((error = sa6_recoverscope(&ifr->ifr_dstaddr)) != 0) goto out; break; case SIOCGIFNETMASK_IN6: ifr->ifr_addr = ia->ia_prefixmask; break; case SIOCGIFAFLAG_IN6: ifr->ifr_ifru.ifru_flags6 = ia->ia6_flags; break; case SIOCGIFSTAT_IN6: COUNTER_ARRAY_COPY(((struct in6_ifextra *) ifp->if_afdata[AF_INET6])->in6_ifstat, &ifr->ifr_ifru.ifru_stat, sizeof(struct in6_ifstat) / sizeof(uint64_t)); break; case SIOCGIFSTAT_ICMP6: COUNTER_ARRAY_COPY(((struct in6_ifextra *) ifp->if_afdata[AF_INET6])->icmp6_ifstat, &ifr->ifr_ifru.ifru_icmp6stat, sizeof(struct icmp6_ifstat) / sizeof(uint64_t)); break; case SIOCGIFALIFETIME_IN6: ifr->ifr_ifru.ifru_lifetime = ia->ia6_lifetime; if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) { time_t maxexpire; struct in6_addrlifetime *retlt = &ifr->ifr_ifru.ifru_lifetime; /* * XXX: adjust expiration time assuming time_t is * signed. */ maxexpire = (-1) & ~((time_t)1 << ((sizeof(maxexpire) * 8) - 1)); if (ia->ia6_lifetime.ia6t_vltime < maxexpire - ia->ia6_updatetime) { retlt->ia6t_expire = ia->ia6_updatetime + ia->ia6_lifetime.ia6t_vltime; } else retlt->ia6t_expire = maxexpire; } if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) { time_t maxexpire; struct in6_addrlifetime *retlt = &ifr->ifr_ifru.ifru_lifetime; /* * XXX: adjust expiration time assuming time_t is * signed. */ maxexpire = (-1) & ~((time_t)1 << ((sizeof(maxexpire) * 8) - 1)); if (ia->ia6_lifetime.ia6t_pltime < maxexpire - ia->ia6_updatetime) { retlt->ia6t_preferred = ia->ia6_updatetime + ia->ia6_lifetime.ia6t_pltime; } else retlt->ia6t_preferred = maxexpire; } break; case SIOCAIFADDR_IN6: #ifdef MAC /* Check if a MAC policy disallows setting the IPv6 address. */ error = mac_inet6_check_add_addr(cred, &sa6->sin6_addr, ifp); if (error != 0) goto out; #endif error = in6_addifaddr(ifp, ifra, ia); ia = NULL; break; case SIOCDIFADDR_IN6: in6_purgeifaddr(ia); EVENTHANDLER_INVOKE(ifaddr_event_ext, ifp, &ia->ia_ifa, IFADDR_EVENT_DEL); break; default: if (ifp->if_ioctl == NULL) { error = EOPNOTSUPP; goto out; } error = (*ifp->if_ioctl)(ifp, cmd, data); goto out; } error = 0; out: if (control_locked) sx_xunlock(&in6_control_sx); if (ia != NULL) ifa_free(&ia->ia_ifa); return (error); } int in6_control(struct socket *so, u_long cmd, void *data, struct ifnet *ifp, struct thread *td) { return (in6_control_ioctl(cmd, data, ifp, td ? td->td_ucred : NULL)); } static struct in6_multi_mship * in6_joingroup_legacy(struct ifnet *ifp, const struct in6_addr *mcaddr, int *errorp, int delay) { struct in6_multi_mship *imm; int error; imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT); if (imm == NULL) { *errorp = ENOBUFS; return (NULL); } delay = (delay * MLD_FASTHZ) / hz; error = in6_joingroup(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay); if (error) { *errorp = error; free(imm, M_IP6MADDR); return (NULL); } return (imm); } static int in6_solicited_node_maddr(struct in6_addr *maddr, struct ifnet *ifp, const struct in6_addr *base) { int error; bzero(maddr, sizeof(struct in6_addr)); maddr->s6_addr32[0] = IPV6_ADDR_INT32_MLL; maddr->s6_addr32[2] = htonl(1); maddr->s6_addr32[3] = base->s6_addr32[3]; maddr->s6_addr8[12] = 0xff; if ((error = in6_setscope(maddr, ifp, NULL)) != 0) { /* XXX: should not happen */ log(LOG_ERR, "%s: in6_setscope failed\n", __func__); } return error; } /* * Join necessary multicast groups. Factored out from in6_update_ifa(). * This entire work should only be done once, for the default FIB. */ static int in6_update_ifa_join_mc(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *ia, int flags, struct in6_multi **in6m_sol) { char ip6buf[INET6_ADDRSTRLEN]; struct in6_addr mltaddr; struct in6_multi_mship *imm; int delay, error; KASSERT(in6m_sol != NULL, ("%s: in6m_sol is NULL", __func__)); /* Join solicited multicast addr for new host id. */ if ((error = in6_solicited_node_maddr(&mltaddr, ifp, &ifra->ifra_addr.sin6_addr)) != 0) goto cleanup; delay = error = 0; if ((flags & IN6_IFAUPDATE_DADDELAY)) { /* * We need a random delay for DAD on the address being * configured. It also means delaying transmission of the * corresponding MLD report to avoid report collision. * [RFC 4861, Section 6.3.7] */ delay = arc4random() % (MAX_RTR_SOLICITATION_DELAY * hz); } imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay); if (imm == NULL) { nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s " "(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr), if_name(ifp), error)); goto cleanup; } LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain); *in6m_sol = imm->i6mm_maddr; /* * Join link-local all-nodes address. */ mltaddr = in6addr_linklocal_allnodes; if ((error = in6_setscope(&mltaddr, ifp, NULL)) != 0) goto cleanup; /* XXX: should not fail */ imm = in6_joingroup_legacy(ifp, &mltaddr, &error, 0); if (imm == NULL) { nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s " "(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr), if_name(ifp), error)); goto cleanup; } LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain); /* * Join node information group address. */ delay = 0; if ((flags & IN6_IFAUPDATE_DADDELAY)) { /* * The spec does not say anything about delay for this group, * but the same logic should apply. */ delay = arc4random() % (MAX_RTR_SOLICITATION_DELAY * hz); } if (in6_nigroup(ifp, NULL, -1, &mltaddr) == 0) { /* XXX jinmei */ imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay); if (imm == NULL) nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s " "(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr), if_name(ifp), error)); /* XXX not very fatal, go on... */ else LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain); } if (V_icmp6_nodeinfo_oldmcprefix && in6_nigroup_oldmcprefix(ifp, NULL, -1, &mltaddr) == 0) { imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay); if (imm == NULL) nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s " "(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr), if_name(ifp), error)); /* XXX not very fatal, go on... */ else LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain); } /* * Join interface-local all-nodes address. * (ff01::1%ifN, and ff01::%ifN/32) */ mltaddr = in6addr_nodelocal_allnodes; if ((error = in6_setscope(&mltaddr, ifp, NULL)) != 0) goto cleanup; /* XXX: should not fail */ imm = in6_joingroup_legacy(ifp, &mltaddr, &error, 0); if (imm == NULL) { nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s " "(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr), if_name(ifp), error)); goto cleanup; } LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain); cleanup: return (error); } /* * Update parameters of an IPv6 interface address. * If necessary, a new entry is created and linked into address chains. * This function is separated from in6_control(). */ int in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *ia, int flags) { int error, hostIsNew = 0; if ((error = in6_validate_ifra(ifp, ifra, ia, flags)) != 0) return (error); if (ia == NULL) { hostIsNew = 1; if ((ia = in6_alloc_ifa(ifp, ifra, flags)) == NULL) return (ENOBUFS); } error = in6_update_ifa_internal(ifp, ifra, ia, hostIsNew, flags); if (error != 0) { if (hostIsNew != 0) { in6_unlink_ifa(ia, ifp); ifa_free(&ia->ia_ifa); } return (error); } if (hostIsNew) error = in6_broadcast_ifa(ifp, ifra, ia, flags); return (error); } /* * Fill in basic IPv6 address request info. */ void in6_prepare_ifra(struct in6_aliasreq *ifra, const struct in6_addr *addr, const struct in6_addr *mask) { memset(ifra, 0, sizeof(struct in6_aliasreq)); ifra->ifra_addr.sin6_family = AF_INET6; ifra->ifra_addr.sin6_len = sizeof(struct sockaddr_in6); if (addr != NULL) ifra->ifra_addr.sin6_addr = *addr; ifra->ifra_prefixmask.sin6_family = AF_INET6; ifra->ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6); if (mask != NULL) ifra->ifra_prefixmask.sin6_addr = *mask; } static int in6_validate_ifra(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *ia, int flags) { int plen = -1; struct sockaddr_in6 dst6; struct in6_addrlifetime *lt; char ip6buf[INET6_ADDRSTRLEN]; /* Validate parameters */ if (ifp == NULL || ifra == NULL) /* this maybe redundant */ return (EINVAL); /* * The destination address for a p2p link must have a family * of AF_UNSPEC or AF_INET6. */ if ((ifp->if_flags & IFF_POINTOPOINT) != 0 && ifra->ifra_dstaddr.sin6_family != AF_INET6 && ifra->ifra_dstaddr.sin6_family != AF_UNSPEC) return (EAFNOSUPPORT); /* * Validate address */ if (ifra->ifra_addr.sin6_len != sizeof(struct sockaddr_in6) || ifra->ifra_addr.sin6_family != AF_INET6) return (EINVAL); /* * validate ifra_prefixmask. don't check sin6_family, netmask * does not carry fields other than sin6_len. */ if (ifra->ifra_prefixmask.sin6_len > sizeof(struct sockaddr_in6)) return (EINVAL); /* * Because the IPv6 address architecture is classless, we require * users to specify a (non 0) prefix length (mask) for a new address. * We also require the prefix (when specified) mask is valid, and thus * reject a non-consecutive mask. */ if (ia == NULL && ifra->ifra_prefixmask.sin6_len == 0) return (EINVAL); if (ifra->ifra_prefixmask.sin6_len != 0) { plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, (u_char *)&ifra->ifra_prefixmask + ifra->ifra_prefixmask.sin6_len); if (plen <= 0) return (EINVAL); } else { /* * In this case, ia must not be NULL. We just use its prefix * length. */ plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); } /* * If the destination address on a p2p interface is specified, * and the address is a scoped one, validate/set the scope * zone identifier. */ dst6 = ifra->ifra_dstaddr; if ((ifp->if_flags & (IFF_POINTOPOINT|IFF_LOOPBACK)) != 0 && (dst6.sin6_family == AF_INET6)) { struct in6_addr in6_tmp; u_int32_t zoneid; in6_tmp = dst6.sin6_addr; if (in6_setscope(&in6_tmp, ifp, &zoneid)) return (EINVAL); /* XXX: should be impossible */ if (dst6.sin6_scope_id != 0) { if (dst6.sin6_scope_id != zoneid) return (EINVAL); } else /* user omit to specify the ID. */ dst6.sin6_scope_id = zoneid; /* convert into the internal form */ if (sa6_embedscope(&dst6, 0)) return (EINVAL); /* XXX: should be impossible */ } /* Modify original ifra_dstaddr to reflect changes */ ifra->ifra_dstaddr = dst6; /* * The destination address can be specified only for a p2p or a * loopback interface. If specified, the corresponding prefix length * must be 128. */ if (ifra->ifra_dstaddr.sin6_family == AF_INET6) { if ((ifp->if_flags & (IFF_POINTOPOINT|IFF_LOOPBACK)) == 0) { /* XXX: noisy message */ nd6log((LOG_INFO, "in6_update_ifa: a destination can " "be specified for a p2p or a loopback IF only\n")); return (EINVAL); } if (plen != 128) { nd6log((LOG_INFO, "in6_update_ifa: prefixlen should " "be 128 when dstaddr is specified\n")); return (EINVAL); } } /* lifetime consistency check */ lt = &ifra->ifra_lifetime; if (lt->ia6t_pltime > lt->ia6t_vltime) return (EINVAL); if (lt->ia6t_vltime == 0) { /* * the following log might be noisy, but this is a typical * configuration mistake or a tool's bug. */ nd6log((LOG_INFO, "in6_update_ifa: valid lifetime is 0 for %s\n", ip6_sprintf(ip6buf, &ifra->ifra_addr.sin6_addr))); if (ia == NULL) return (0); /* there's nothing to do */ } /* Check prefix mask */ if (ia != NULL && ifra->ifra_prefixmask.sin6_len != 0) { /* * We prohibit changing the prefix length of an existing * address, because * + such an operation should be rare in IPv6, and * + the operation would confuse prefix management. */ if (ia->ia_prefixmask.sin6_len != 0 && in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL) != plen) { nd6log((LOG_INFO, "in6_validate_ifa: the prefix length " "of an existing %s address should not be changed\n", ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr))); return (EINVAL); } } return (0); } /* * Allocate a new ifaddr and link it into chains. */ static struct in6_ifaddr * in6_alloc_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int flags) { struct in6_ifaddr *ia; /* * When in6_alloc_ifa() is called in a process of a received * RA, it is called under an interrupt context. So, we should * call malloc with M_NOWAIT. */ ia = (struct in6_ifaddr *)ifa_alloc(sizeof(*ia), M_NOWAIT); if (ia == NULL) return (NULL); LIST_INIT(&ia->ia6_memberships); /* Initialize the address and masks, and put time stamp */ ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; ia->ia_addr.sin6_family = AF_INET6; ia->ia_addr.sin6_len = sizeof(ia->ia_addr); /* XXX: Can we assign ,sin6_addr and skip the rest? */ ia->ia_addr = ifra->ifra_addr; ia->ia6_createtime = time_uptime; if ((ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) != 0) { /* * Some functions expect that ifa_dstaddr is not * NULL for p2p interfaces. */ ia->ia_ifa.ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr; } else { ia->ia_ifa.ifa_dstaddr = NULL; } /* set prefix mask if any */ ia->ia_ifa.ifa_netmask = (struct sockaddr *)&ia->ia_prefixmask; if (ifra->ifra_prefixmask.sin6_len != 0) { ia->ia_prefixmask.sin6_family = AF_INET6; ia->ia_prefixmask.sin6_len = ifra->ifra_prefixmask.sin6_len; ia->ia_prefixmask.sin6_addr = ifra->ifra_prefixmask.sin6_addr; } ia->ia_ifp = ifp; ifa_ref(&ia->ia_ifa); /* if_addrhead */ IF_ADDR_WLOCK(ifp); CK_STAILQ_INSERT_TAIL(&ifp->if_addrhead, &ia->ia_ifa, ifa_link); IF_ADDR_WUNLOCK(ifp); ifa_ref(&ia->ia_ifa); /* in6_ifaddrhead */ IN6_IFADDR_WLOCK(); CK_STAILQ_INSERT_TAIL(&V_in6_ifaddrhead, ia, ia_link); CK_LIST_INSERT_HEAD(IN6ADDR_HASH(&ia->ia_addr.sin6_addr), ia, ia6_hash); IN6_IFADDR_WUNLOCK(); return (ia); } /* * Update/configure interface address parameters: * * 1) Update lifetime * 2) Update interface metric ad flags * 3) Notify other subsystems */ static int in6_update_ifa_internal(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *ia, int hostIsNew, int flags) { int error; /* update timestamp */ ia->ia6_updatetime = time_uptime; /* * Set lifetimes. We do not refer to ia6t_expire and ia6t_preferred * to see if the address is deprecated or invalidated, but initialize * these members for applications. */ ia->ia6_lifetime = ifra->ifra_lifetime; if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) { ia->ia6_lifetime.ia6t_expire = time_uptime + ia->ia6_lifetime.ia6t_vltime; } else ia->ia6_lifetime.ia6t_expire = 0; if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) { ia->ia6_lifetime.ia6t_preferred = time_uptime + ia->ia6_lifetime.ia6t_pltime; } else ia->ia6_lifetime.ia6t_preferred = 0; /* * backward compatibility - if IN6_IFF_DEPRECATED is set from the * userland, make it deprecated. */ if ((ifra->ifra_flags & IN6_IFF_DEPRECATED) != 0) { ia->ia6_lifetime.ia6t_pltime = 0; ia->ia6_lifetime.ia6t_preferred = time_uptime; } /* * configure address flags. */ ia->ia6_flags = ifra->ifra_flags; /* * Make the address tentative before joining multicast addresses, * so that corresponding MLD responses would not have a tentative * source address. */ ia->ia6_flags &= ~IN6_IFF_DUPLICATED; /* safety */ /* * DAD should be performed for an new address or addresses on * an interface with ND6_IFF_IFDISABLED. */ if (in6if_do_dad(ifp) && (hostIsNew || (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED))) ia->ia6_flags |= IN6_IFF_TENTATIVE; /* notify other subsystems */ error = in6_notify_ifa(ifp, ia, ifra, hostIsNew); return (error); } /* * Do link-level ifa job: * 1) Add lle entry for added address * 2) Notifies routing socket users about new address * 3) join appropriate multicast group * 4) start DAD if enabled */ static int in6_broadcast_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *ia, int flags) { struct in6_multi *in6m_sol; int error = 0; /* Add local address to lltable, if necessary (ex. on p2p link). */ if ((error = nd6_add_ifa_lle(ia)) != 0) { in6_purgeaddr(&ia->ia_ifa); ifa_free(&ia->ia_ifa); return (error); } /* Join necessary multicast groups. */ in6m_sol = NULL; if ((ifp->if_flags & IFF_MULTICAST) != 0) { error = in6_update_ifa_join_mc(ifp, ifra, ia, flags, &in6m_sol); if (error != 0) { in6_purgeaddr(&ia->ia_ifa); ifa_free(&ia->ia_ifa); return (error); } } /* Perform DAD, if the address is TENTATIVE. */ if ((ia->ia6_flags & IN6_IFF_TENTATIVE)) { int delay, mindelay, maxdelay; delay = 0; if ((flags & IN6_IFAUPDATE_DADDELAY)) { /* * We need to impose a delay before sending an NS * for DAD. Check if we also needed a delay for the * corresponding MLD message. If we did, the delay * should be larger than the MLD delay (this could be * relaxed a bit, but this simple logic is at least * safe). * XXX: Break data hiding guidelines and look at * state for the solicited multicast group. */ mindelay = 0; if (in6m_sol != NULL && in6m_sol->in6m_state == MLD_REPORTING_MEMBER) { mindelay = in6m_sol->in6m_timer; } maxdelay = MAX_RTR_SOLICITATION_DELAY * hz; if (maxdelay - mindelay == 0) delay = 0; else { delay = (arc4random() % (maxdelay - mindelay)) + mindelay; } } nd6_dad_start((struct ifaddr *)ia, delay); } in6_newaddrmsg(ia, RTM_ADD); ifa_free(&ia->ia_ifa); return (error); } /* * Adds or deletes interface route for p2p ifa. * Returns 0 on success or errno. */ static int in6_handle_dstaddr_rtrequest(int cmd, struct in6_ifaddr *ia) { struct epoch_tracker et; struct ifaddr *ifa = &ia->ia_ifa; int error; /* Prepare gateway */ struct sockaddr_dl_short sdl = { .sdl_family = AF_LINK, .sdl_len = sizeof(struct sockaddr_dl_short), .sdl_type = ifa->ifa_ifp->if_type, .sdl_index = ifa->ifa_ifp->if_index, }; struct sockaddr_in6 dst = { .sin6_family = AF_INET6, .sin6_len = sizeof(struct sockaddr_in6), .sin6_addr = ia->ia_dstaddr.sin6_addr, }; struct rt_addrinfo info = { .rti_ifa = ifa, .rti_ifp = ifa->ifa_ifp, .rti_flags = RTF_PINNED | RTF_HOST, .rti_info = { [RTAX_DST] = (struct sockaddr *)&dst, [RTAX_GATEWAY] = (struct sockaddr *)&sdl, }, }; /* Don't set additional per-gw filters on removal */ NET_EPOCH_ENTER(et); error = rib_handle_ifaddr_info(ifa->ifa_ifp->if_fib, cmd, &info); NET_EPOCH_EXIT(et); return (error); } static bool ifa_is_p2p(struct in6_ifaddr *ia) { int plen; plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); /* XXX */ if ((plen == 128) && (ia->ia_dstaddr.sin6_family == AF_INET6) && !IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr, &ia->ia_dstaddr.sin6_addr)) return (true); return (false); } int in6_addifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *ia) { struct nd_prefixctl pr0; struct nd_prefix *pr; int carp_attached = 0; int error; /* * first, make or update the interface address structure, * and link it to the list. */ if ((error = in6_update_ifa(ifp, ifra, ia, 0)) != 0) goto out; if (ia != NULL) { if (ia->ia_ifa.ifa_carp) (*carp_detach_p)(&ia->ia_ifa, true); ifa_free(&ia->ia_ifa); } if ((ia = in6ifa_ifpwithaddr(ifp, &ifra->ifra_addr.sin6_addr)) == NULL) { /* * this can happen when the user specify the 0 valid * lifetime. */ return (0); } if (ifra->ifra_vhid > 0) { if (carp_attach_p != NULL) error = (*carp_attach_p)(&ia->ia_ifa, ifra->ifra_vhid); else error = EPROTONOSUPPORT; if (error) goto out; else carp_attached = 1; } /* * then, make the prefix on-link on the interface. * XXX: we'd rather create the prefix before the address, but * we need at least one address to install the corresponding * interface route, so we configure the address first. */ /* * convert mask to prefix length (prefixmask has already * been validated in in6_update_ifa(). */ bzero(&pr0, sizeof(pr0)); pr0.ndpr_ifp = ifp; pr0.ndpr_plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, NULL); if (pr0.ndpr_plen == 128) { /* we don't need to install a host route. */ goto aifaddr_out; } pr0.ndpr_prefix = ifra->ifra_addr; /* apply the mask for safety. */ IN6_MASK_ADDR(&pr0.ndpr_prefix.sin6_addr, &ifra->ifra_prefixmask.sin6_addr); /* * XXX: since we don't have an API to set prefix (not address) * lifetimes, we just use the same lifetimes as addresses. * The (temporarily) installed lifetimes can be overridden by * later advertised RAs (when accept_rtadv is non 0), which is * an intended behavior. */ pr0.ndpr_raf_onlink = 1; /* should be configurable? */ pr0.ndpr_raf_auto = ((ifra->ifra_flags & IN6_IFF_AUTOCONF) != 0); pr0.ndpr_vltime = ifra->ifra_lifetime.ia6t_vltime; pr0.ndpr_pltime = ifra->ifra_lifetime.ia6t_pltime; /* add the prefix if not yet. */ if ((pr = nd6_prefix_lookup(&pr0)) == NULL) { /* * nd6_prelist_add will install the corresponding * interface route. */ if ((error = nd6_prelist_add(&pr0, NULL, &pr)) != 0) { if (carp_attached) (*carp_detach_p)(&ia->ia_ifa, false); goto out; } } /* relate the address to the prefix */ if (ia->ia6_ndpr == NULL) { ia->ia6_ndpr = pr; pr->ndpr_addrcnt++; /* * If this is the first autoconf address from the * prefix, create a temporary address as well * (when required). */ if ((ia->ia6_flags & IN6_IFF_AUTOCONF) && V_ip6_use_tempaddr && pr->ndpr_addrcnt == 1) { int e; if ((e = in6_tmpifadd(ia, 1, 0)) != 0) { log(LOG_NOTICE, "in6_control: failed " "to create a temporary address, " "errno=%d\n", e); } } } nd6_prefix_rele(pr); /* * this might affect the status of autoconfigured addresses, * that is, this address might make other addresses detached. */ pfxlist_onlink_check(); aifaddr_out: /* * Try to clear the flag when a new IPv6 address is added * onto an IFDISABLED interface and it succeeds. */ if (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) { struct in6_ndireq nd; memset(&nd, 0, sizeof(nd)); nd.ndi.flags = ND_IFINFO(ifp)->flags; nd.ndi.flags &= ~ND6_IFF_IFDISABLED; if (nd6_ioctl(SIOCSIFINFO_FLAGS, (caddr_t)&nd, ifp) < 0) log(LOG_NOTICE, "SIOCAIFADDR_IN6: " "SIOCSIFINFO_FLAGS for -ifdisabled " "failed."); /* * Ignore failure of clearing the flag intentionally. * The failure means address duplication was detected. */ } error = 0; out: if (ia != NULL) ifa_free(&ia->ia_ifa); return (error); } void in6_purgeaddr(struct ifaddr *ifa) { struct ifnet *ifp = ifa->ifa_ifp; struct in6_ifaddr *ia = (struct in6_ifaddr *) ifa; struct in6_multi_mship *imm; int error; if (ifa->ifa_carp) (*carp_detach_p)(ifa, false); /* * Remove the loopback route to the interface address. * The check for the current setting of "nd6_useloopback" * is not needed. */ if (ia->ia_flags & IFA_RTSELF) { error = ifa_del_loopback_route((struct ifaddr *)ia, (struct sockaddr *)&ia->ia_addr); if (error == 0) ia->ia_flags &= ~IFA_RTSELF; } /* stop DAD processing */ nd6_dad_stop(ifa); /* Leave multicast groups. */ while ((imm = LIST_FIRST(&ia->ia6_memberships)) != NULL) { LIST_REMOVE(imm, i6mm_chain); if (imm->i6mm_maddr != NULL) in6_leavegroup(imm->i6mm_maddr, NULL); free(imm, M_IP6MADDR); } /* Check if we need to remove p2p route */ if ((ia->ia_flags & IFA_ROUTE) && ifa_is_p2p(ia)) { error = in6_handle_dstaddr_rtrequest(RTM_DELETE, ia); if (error != 0) log(LOG_INFO, "%s: err=%d, destination address delete " "failed\n", __func__, error); ia->ia_flags &= ~IFA_ROUTE; } in6_newaddrmsg(ia, RTM_DELETE); in6_unlink_ifa(ia, ifp); } /* * Removes @ia from the corresponding interfaces and unlinks corresponding * prefix if no addresses are using it anymore. */ void in6_purgeifaddr(struct in6_ifaddr *ia) { struct nd_prefix *pr; /* * If the address being deleted is the only one that owns * the corresponding prefix, expire the prefix as well. * XXX: theoretically, we don't have to worry about such * relationship, since we separate the address management * and the prefix management. We do this, however, to provide * as much backward compatibility as possible in terms of * the ioctl operation. * Note that in6_purgeaddr() will decrement ndpr_addrcnt. */ pr = ia->ia6_ndpr; in6_purgeaddr(&ia->ia_ifa); if (pr != NULL && pr->ndpr_addrcnt == 0) { ND6_WLOCK(); nd6_prefix_unlink(pr, NULL); ND6_WUNLOCK(); nd6_prefix_del(pr); } } static void in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp) { char ip6buf[INET6_ADDRSTRLEN]; int remove_lle; IF_ADDR_WLOCK(ifp); CK_STAILQ_REMOVE(&ifp->if_addrhead, &ia->ia_ifa, ifaddr, ifa_link); IF_ADDR_WUNLOCK(ifp); ifa_free(&ia->ia_ifa); /* if_addrhead */ /* * Defer the release of what might be the last reference to the * in6_ifaddr so that it can't be freed before the remainder of the * cleanup. */ IN6_IFADDR_WLOCK(); CK_STAILQ_REMOVE(&V_in6_ifaddrhead, ia, in6_ifaddr, ia_link); CK_LIST_REMOVE(ia, ia6_hash); IN6_IFADDR_WUNLOCK(); /* * Release the reference to the base prefix. There should be a * positive reference. */ remove_lle = 0; if (ia->ia6_ndpr == NULL) { nd6log((LOG_NOTICE, "in6_unlink_ifa: autoconf'ed address " "%s has no prefix\n", ip6_sprintf(ip6buf, IA6_IN6(ia)))); } else { ia->ia6_ndpr->ndpr_addrcnt--; /* Do not delete lles within prefix if refcont != 0 */ if (ia->ia6_ndpr->ndpr_addrcnt == 0) remove_lle = 1; ia->ia6_ndpr = NULL; } nd6_rem_ifa_lle(ia, remove_lle); /* * Also, if the address being removed is autoconf'ed, call * pfxlist_onlink_check() since the release might affect the status of * other (detached) addresses. */ if ((ia->ia6_flags & IN6_IFF_AUTOCONF)) { pfxlist_onlink_check(); } ifa_free(&ia->ia_ifa); /* in6_ifaddrhead */ } /* * Notifies other subsystems about address change/arrival: * 1) Notifies device handler on the first IPv6 address assignment * 2) Handle routing table changes for P2P links and route * 3) Handle routing table changes for address host route */ static int in6_notify_ifa(struct ifnet *ifp, struct in6_ifaddr *ia, struct in6_aliasreq *ifra, int hostIsNew) { int error = 0, ifacount = 0; struct ifaddr *ifa; struct sockaddr_in6 *pdst; char ip6buf[INET6_ADDRSTRLEN]; /* * Give the interface a chance to initialize * if this is its first address, */ if (hostIsNew != 0) { struct epoch_tracker et; NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; ifacount++; } NET_EPOCH_EXIT(et); } if (ifacount <= 1 && ifp->if_ioctl) { error = (*ifp->if_ioctl)(ifp, SIOCSIFADDR, (caddr_t)ia); if (error) goto done; } /* * If a new destination address is specified, scrub the old one and * install the new destination. Note that the interface must be * p2p or loopback. */ pdst = &ifra->ifra_dstaddr; if (pdst->sin6_family == AF_INET6 && !IN6_ARE_ADDR_EQUAL(&pdst->sin6_addr, &ia->ia_dstaddr.sin6_addr)) { if ((ia->ia_flags & IFA_ROUTE) != 0 && (in6_handle_dstaddr_rtrequest(RTM_DELETE, ia) != 0)) { nd6log((LOG_ERR, "in6_update_ifa_internal: failed to " "remove a route to the old destination: %s\n", ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr))); /* proceed anyway... */ } else ia->ia_flags &= ~IFA_ROUTE; ia->ia_dstaddr = *pdst; } /* * If a new destination address is specified for a point-to-point * interface, install a route to the destination as an interface * direct route. * XXX: the logic below rejects assigning multiple addresses on a p2p * interface that share the same destination. */ if (!(ia->ia_flags & IFA_ROUTE) && ifa_is_p2p(ia)) { error = in6_handle_dstaddr_rtrequest(RTM_ADD, ia); if (error) goto done; ia->ia_flags |= IFA_ROUTE; } /* * add a loopback route to self if not exists */ if (!(ia->ia_flags & IFA_RTSELF) && V_nd6_useloopback) { error = ifa_add_loopback_route((struct ifaddr *)ia, (struct sockaddr *)&ia->ia_addr); if (error == 0) ia->ia_flags |= IFA_RTSELF; } done: WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Invoking IPv6 network device address event may sleep"); ifa_ref(&ia->ia_ifa); EVENTHANDLER_INVOKE(ifaddr_event_ext, ifp, &ia->ia_ifa, IFADDR_EVENT_ADD); ifa_free(&ia->ia_ifa); return (error); } /* * Find an IPv6 interface link-local address specific to an interface. * ifaddr is returned referenced. */ struct in6_ifaddr * in6ifa_ifpforlinklocal(struct ifnet *ifp, int ignoreflags) { struct ifaddr *ifa; NET_EPOCH_ASSERT(); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; if (IN6_IS_ADDR_LINKLOCAL(IFA_IN6(ifa))) { if ((((struct in6_ifaddr *)ifa)->ia6_flags & ignoreflags) != 0) continue; ifa_ref(ifa); break; } } return ((struct in6_ifaddr *)ifa); } /* * find the interface address corresponding to a given IPv6 address. * ifaddr is returned referenced if @referenced flag is set. */ struct in6_ifaddr * in6ifa_ifwithaddr(const struct in6_addr *addr, uint32_t zoneid, bool referenced) { struct rm_priotracker in6_ifa_tracker; struct in6_ifaddr *ia; IN6_IFADDR_RLOCK(&in6_ifa_tracker); CK_LIST_FOREACH(ia, IN6ADDR_HASH(addr), ia6_hash) { if (IN6_ARE_ADDR_EQUAL(IA6_IN6(ia), addr)) { if (zoneid != 0 && zoneid != ia->ia_addr.sin6_scope_id) continue; if (referenced) ifa_ref(&ia->ia_ifa); break; } } IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return (ia); } /* * find the internet address corresponding to a given interface and address. * ifaddr is returned referenced. */ struct in6_ifaddr * in6ifa_ifpwithaddr(struct ifnet *ifp, const struct in6_addr *addr) { struct epoch_tracker et; struct ifaddr *ifa; NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; if (IN6_ARE_ADDR_EQUAL(addr, IFA_IN6(ifa))) { ifa_ref(ifa); break; } } NET_EPOCH_EXIT(et); return ((struct in6_ifaddr *)ifa); } /* * Find a link-local scoped address on ifp and return it if any. */ struct in6_ifaddr * in6ifa_llaonifp(struct ifnet *ifp) { struct epoch_tracker et; struct sockaddr_in6 *sin6; struct ifaddr *ifa; if (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) return (NULL); NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; sin6 = (struct sockaddr_in6 *)ifa->ifa_addr; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr) || IN6_IS_ADDR_MC_INTFACELOCAL(&sin6->sin6_addr) || IN6_IS_ADDR_MC_NODELOCAL(&sin6->sin6_addr)) break; } NET_EPOCH_EXIT(et); return ((struct in6_ifaddr *)ifa); } /* * Convert IP6 address to printable (loggable) representation. Caller * has to make sure that ip6buf is at least INET6_ADDRSTRLEN long. */ static char digits[] = "0123456789abcdef"; char * ip6_sprintf(char *ip6buf, const struct in6_addr *addr) { int i, cnt = 0, maxcnt = 0, idx = 0, index = 0; char *cp; const u_int16_t *a = (const u_int16_t *)addr; const u_int8_t *d; int dcolon = 0, zero = 0; cp = ip6buf; for (i = 0; i < 8; i++) { if (*(a + i) == 0) { cnt++; if (cnt == 1) idx = i; } else if (maxcnt < cnt) { maxcnt = cnt; index = idx; cnt = 0; } } if (maxcnt < cnt) { maxcnt = cnt; index = idx; } for (i = 0; i < 8; i++) { if (dcolon == 1) { if (*a == 0) { if (i == 7) *cp++ = ':'; a++; continue; } else dcolon = 2; } if (*a == 0) { if (dcolon == 0 && *(a + 1) == 0 && i == index) { if (i == 0) *cp++ = ':'; *cp++ = ':'; dcolon = 1; } else { *cp++ = '0'; *cp++ = ':'; } a++; continue; } d = (const u_char *)a; /* Try to eliminate leading zeros in printout like in :0001. */ zero = 1; *cp = digits[*d >> 4]; if (*cp != '0') { zero = 0; cp++; } *cp = digits[*d++ & 0xf]; if (zero == 0 || (*cp != '0')) { zero = 0; cp++; } *cp = digits[*d >> 4]; if (zero == 0 || (*cp != '0')) { zero = 0; cp++; } *cp++ = digits[*d & 0xf]; *cp++ = ':'; a++; } *--cp = '\0'; return (ip6buf); } int in6_localaddr(struct in6_addr *in6) { struct rm_priotracker in6_ifa_tracker; struct in6_ifaddr *ia; if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_ADDR_LINKLOCAL(in6)) return 1; IN6_IFADDR_RLOCK(&in6_ifa_tracker); CK_STAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { if (IN6_ARE_MASKED_ADDR_EQUAL(in6, &ia->ia_addr.sin6_addr, &ia->ia_prefixmask.sin6_addr)) { IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return 1; } } IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return (0); } /* * Return 1 if an internet address is for the local host and configured * on one of its interfaces. */ int in6_localip(struct in6_addr *in6) { struct rm_priotracker in6_ifa_tracker; struct in6_ifaddr *ia; IN6_IFADDR_RLOCK(&in6_ifa_tracker); CK_LIST_FOREACH(ia, IN6ADDR_HASH(in6), ia6_hash) { if (IN6_ARE_ADDR_EQUAL(in6, &ia->ia_addr.sin6_addr)) { IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return (1); } } IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return (0); } /* * Like in6_localip(), but FIB-aware. */ bool in6_localip_fib(struct in6_addr *in6, uint16_t fib) { struct rm_priotracker in6_ifa_tracker; struct in6_ifaddr *ia; IN6_IFADDR_RLOCK(&in6_ifa_tracker); CK_LIST_FOREACH(ia, IN6ADDR_HASH(in6), ia6_hash) { if (IN6_ARE_ADDR_EQUAL(in6, &ia->ia_addr.sin6_addr) && ia->ia_ifa.ifa_ifp->if_fib == fib) { IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return (true); } } IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return (false); } /* * Return 1 if an internet address is configured on an interface. */ int in6_ifhasaddr(struct ifnet *ifp, struct in6_addr *addr) { struct in6_addr in6; struct ifaddr *ifa; struct in6_ifaddr *ia6; NET_EPOCH_ASSERT(); in6 = *addr; if (in6_clearscope(&in6)) return (0); in6_setscope(&in6, ifp, NULL); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; ia6 = (struct in6_ifaddr *)ifa; if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &in6)) return (1); } return (0); } int in6_is_addr_deprecated(struct sockaddr_in6 *sa6) { struct rm_priotracker in6_ifa_tracker; struct in6_ifaddr *ia; IN6_IFADDR_RLOCK(&in6_ifa_tracker); CK_LIST_FOREACH(ia, IN6ADDR_HASH(&sa6->sin6_addr), ia6_hash) { if (IN6_ARE_ADDR_EQUAL(IA6_IN6(ia), &sa6->sin6_addr)) { if (ia->ia6_flags & IN6_IFF_DEPRECATED) { IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return (1); /* true */ } break; } } IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); return (0); /* false */ } /* * return length of part which dst and src are equal * hard coding... */ int in6_matchlen(struct in6_addr *src, struct in6_addr *dst) { int match = 0; u_char *s = (u_char *)src, *d = (u_char *)dst; u_char *lim = s + 16, r; while (s < lim) if ((r = (*d++ ^ *s++)) != 0) { while (r < 128) { match++; r <<= 1; } break; } else match += 8; return match; } /* XXX: to be scope conscious */ int in6_are_prefix_equal(struct in6_addr *p1, struct in6_addr *p2, int len) { int bytelen, bitlen; /* sanity check */ if (0 > len || len > 128) { log(LOG_ERR, "in6_are_prefix_equal: invalid prefix length(%d)\n", len); return (0); } bytelen = len / 8; bitlen = len % 8; if (bcmp(&p1->s6_addr, &p2->s6_addr, bytelen)) return (0); if (bitlen != 0 && p1->s6_addr[bytelen] >> (8 - bitlen) != p2->s6_addr[bytelen] >> (8 - bitlen)) return (0); return (1); } void in6_prefixlen2mask(struct in6_addr *maskp, int len) { u_char maskarray[8] = {0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff}; int bytelen, bitlen, i; /* sanity check */ if (0 > len || len > 128) { log(LOG_ERR, "in6_prefixlen2mask: invalid prefix length(%d)\n", len); return; } bzero(maskp, sizeof(*maskp)); bytelen = len / 8; bitlen = len % 8; for (i = 0; i < bytelen; i++) maskp->s6_addr[i] = 0xff; if (bitlen) maskp->s6_addr[bytelen] = maskarray[bitlen - 1]; } /* * return the best address out of the same scope. if no address was * found, return the first valid address from designated IF. */ struct in6_ifaddr * in6_ifawithifp(struct ifnet *ifp, struct in6_addr *dst) { int dst_scope = in6_addrscope(dst), blen = -1, tlen; struct ifaddr *ifa; struct in6_ifaddr *besta = NULL; struct in6_ifaddr *dep[2]; /* last-resort: deprecated */ NET_EPOCH_ASSERT(); dep[0] = dep[1] = NULL; /* * We first look for addresses in the same scope. * If there is one, return it. * If two or more, return one which matches the dst longest. * If none, return one of global addresses assigned other ifs. */ CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST) continue; /* XXX: is there any case to allow anycast? */ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_NOTREADY) continue; /* don't use this interface */ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DETACHED) continue; if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DEPRECATED) { if (V_ip6_use_deprecated) dep[0] = (struct in6_ifaddr *)ifa; continue; } if (dst_scope == in6_addrscope(IFA_IN6(ifa))) { /* * call in6_matchlen() as few as possible */ if (besta) { if (blen == -1) blen = in6_matchlen(&besta->ia_addr.sin6_addr, dst); tlen = in6_matchlen(IFA_IN6(ifa), dst); if (tlen > blen) { blen = tlen; besta = (struct in6_ifaddr *)ifa; } } else besta = (struct in6_ifaddr *)ifa; } } if (besta) return (besta); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST) continue; /* XXX: is there any case to allow anycast? */ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_NOTREADY) continue; /* don't use this interface */ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DETACHED) continue; if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DEPRECATED) { if (V_ip6_use_deprecated) dep[1] = (struct in6_ifaddr *)ifa; continue; } return (struct in6_ifaddr *)ifa; } /* use the last-resort values, that are, deprecated addresses */ if (dep[0]) return dep[0]; if (dep[1]) return dep[1]; return NULL; } /* * perform DAD when interface becomes IFF_UP. */ void in6_if_up(struct ifnet *ifp) { struct epoch_tracker et; struct ifaddr *ifa; struct in6_ifaddr *ia; NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; ia = (struct in6_ifaddr *)ifa; if (ia->ia6_flags & IN6_IFF_TENTATIVE) { /* * The TENTATIVE flag was likely set by hand * beforehand, implicitly indicating the need for DAD. * We may be able to skip the random delay in this * case, but we impose delays just in case. */ nd6_dad_start(ifa, arc4random() % (MAX_RTR_SOLICITATION_DELAY * hz)); } } NET_EPOCH_EXIT(et); /* * special cases, like 6to4, are handled in in6_ifattach */ in6_ifattach(ifp, NULL); } static void in6_ifevent(void *arg __unused, struct ifnet *ifp, int event) { if (event == IFNET_EVENT_UP) in6_if_up(ifp); } static void in6_init(void *arg __unused) { EVENTHANDLER_REGISTER(ifnet_event, in6_ifevent, NULL, EVENTHANDLER_PRI_ANY); } SYSINIT(in6_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, in6_init, NULL); int in6if_do_dad(struct ifnet *ifp) { if ((ifp->if_flags & IFF_LOOPBACK) != 0) return (0); if ((ifp->if_flags & IFF_MULTICAST) == 0) return (0); if ((ND_IFINFO(ifp)->flags & (ND6_IFF_IFDISABLED | ND6_IFF_NO_DAD)) != 0) return (0); return (1); } /* * Calculate max IPv6 MTU through all the interfaces and store it * to in6_maxmtu. */ void in6_setmaxmtu(void) { struct epoch_tracker et; unsigned long maxmtu = 0; struct ifnet *ifp; NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { /* this function can be called during ifnet initialization */ if (!ifp->if_afdata[AF_INET6]) continue; if ((ifp->if_flags & IFF_LOOPBACK) == 0 && IN6_LINKMTU(ifp) > maxmtu) maxmtu = IN6_LINKMTU(ifp); } NET_EPOCH_EXIT(et); if (maxmtu) /* update only when maxmtu is positive */ V_in6_maxmtu = maxmtu; } /* * Provide the length of interface identifiers to be used for the link attached * to the given interface. The length should be defined in "IPv6 over * xxx-link" document. Note that address architecture might also define * the length for a particular set of address prefixes, regardless of the * link type. As clarified in rfc2462bis, those two definitions should be * consistent, and those really are as of August 2004. */ int in6_if2idlen(struct ifnet *ifp) { switch (ifp->if_type) { case IFT_ETHER: /* RFC2464 */ case IFT_PROPVIRTUAL: /* XXX: no RFC. treat it as ether */ case IFT_L2VLAN: /* ditto */ case IFT_BRIDGE: /* bridge(4) only does Ethernet-like links */ case IFT_INFINIBAND: return (64); case IFT_PPP: /* RFC2472 */ return (64); case IFT_FRELAY: /* RFC2590 */ return (64); case IFT_IEEE1394: /* RFC3146 */ return (64); case IFT_GIF: return (64); /* draft-ietf-v6ops-mech-v2-07 */ case IFT_LOOP: return (64); /* XXX: is this really correct? */ default: /* * Unknown link type: * It might be controversial to use the today's common constant * of 64 for these cases unconditionally. For full compliance, * we should return an error in this case. On the other hand, * if we simply miss the standard for the link type or a new * standard is defined for a new link type, the IFID length * is very likely to be the common constant. As a compromise, * we always use the constant, but make an explicit notice * indicating the "unknown" case. */ printf("in6_if2idlen: unknown link type (%d)\n", ifp->if_type); return (64); } } struct in6_llentry { struct llentry base; }; #define IN6_LLTBL_DEFAULT_HSIZE 32 #define IN6_LLTBL_HASH(k, h) \ (((((((k >> 8) ^ k) >> 8) ^ k) >> 8) ^ k) & ((h) - 1)) /* * Do actual deallocation of @lle. */ static void in6_lltable_destroy_lle_unlocked(epoch_context_t ctx) { struct llentry *lle; lle = __containerof(ctx, struct llentry, lle_epoch_ctx); LLE_LOCK_DESTROY(lle); LLE_REQ_DESTROY(lle); free(lle, M_LLTABLE); } /* * Called by LLE_FREE_LOCKED when number of references * drops to zero. */ static void in6_lltable_destroy_lle(struct llentry *lle) { LLE_WUNLOCK(lle); NET_EPOCH_CALL(in6_lltable_destroy_lle_unlocked, &lle->lle_epoch_ctx); } static struct llentry * in6_lltable_new(const struct in6_addr *addr6, u_int flags) { struct in6_llentry *lle; lle = malloc(sizeof(struct in6_llentry), M_LLTABLE, M_NOWAIT | M_ZERO); if (lle == NULL) /* NB: caller generates msg */ return NULL; lle->base.r_l3addr.addr6 = *addr6; lle->base.lle_refcnt = 1; lle->base.lle_free = in6_lltable_destroy_lle; LLE_LOCK_INIT(&lle->base); LLE_REQ_INIT(&lle->base); callout_init(&lle->base.lle_timer, 1); return (&lle->base); } static int in6_lltable_match_prefix(const struct sockaddr *saddr, const struct sockaddr *smask, u_int flags, struct llentry *lle) { const struct in6_addr *addr, *mask, *lle_addr; addr = &((const struct sockaddr_in6 *)saddr)->sin6_addr; mask = &((const struct sockaddr_in6 *)smask)->sin6_addr; lle_addr = &lle->r_l3addr.addr6; if (IN6_ARE_MASKED_ADDR_EQUAL(lle_addr, addr, mask) == 0) return (0); if (lle->la_flags & LLE_IFADDR) { /* * Delete LLE_IFADDR records IFF address & flag matches. * Note that addr is the interface address within prefix * being matched. */ if (IN6_ARE_ADDR_EQUAL(addr, lle_addr) && (flags & LLE_STATIC) != 0) return (1); return (0); } /* flags & LLE_STATIC means deleting both dynamic and static entries */ if ((flags & LLE_STATIC) || !(lle->la_flags & LLE_STATIC)) return (1); return (0); } static void in6_lltable_free_entry(struct lltable *llt, struct llentry *lle) { struct ifnet *ifp __diagused; LLE_WLOCK_ASSERT(lle); KASSERT(llt != NULL, ("lltable is NULL")); /* Unlink entry from table */ if ((lle->la_flags & LLE_LINKED) != 0) { ifp = llt->llt_ifp; IF_AFDATA_WLOCK_ASSERT(ifp); lltable_unlink_entry(llt, lle); } llentry_free(lle); } static int in6_lltable_rtcheck(struct ifnet *ifp, u_int flags, const struct sockaddr *l3addr) { const struct sockaddr_in6 *sin6; struct nhop_object *nh; struct in6_addr dst; uint32_t scopeid; char ip6buf[INET6_ADDRSTRLEN]; int fibnum; NET_EPOCH_ASSERT(); KASSERT(l3addr->sa_family == AF_INET6, ("sin_family %d", l3addr->sa_family)); sin6 = (const struct sockaddr_in6 *)l3addr; in6_splitscope(&sin6->sin6_addr, &dst, &scopeid); fibnum = V_rt_add_addr_allfibs ? RT_DEFAULT_FIB : ifp->if_fib; nh = fib6_lookup(fibnum, &dst, scopeid, NHR_NONE, 0); if (nh && ((nh->nh_flags & NHF_GATEWAY) || nh->nh_ifp != ifp)) { struct ifaddr *ifa; /* * Create an ND6 cache for an IPv6 neighbor * that is not covered by our own prefix. */ ifa = ifaof_ifpforaddr(l3addr, ifp); if (ifa != NULL) { return 0; } log(LOG_INFO, "IPv6 address: \"%s\" is not on the network\n", ip6_sprintf(ip6buf, &sin6->sin6_addr)); return EINVAL; } return 0; } static inline uint32_t in6_lltable_hash_dst(const struct in6_addr *dst, uint32_t hsize) { return (IN6_LLTBL_HASH(dst->s6_addr32[3], hsize)); } static uint32_t in6_lltable_hash(const struct llentry *lle, uint32_t hsize) { return (in6_lltable_hash_dst(&lle->r_l3addr.addr6, hsize)); } static void in6_lltable_fill_sa_entry(const struct llentry *lle, struct sockaddr *sa) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)sa; bzero(sin6, sizeof(*sin6)); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); sin6->sin6_addr = lle->r_l3addr.addr6; } static inline struct llentry * in6_lltable_find_dst(struct lltable *llt, const struct in6_addr *dst) { struct llentry *lle; struct llentries *lleh; u_int hashidx; hashidx = in6_lltable_hash_dst(dst, llt->llt_hsize); lleh = &llt->lle_head[hashidx]; CK_LIST_FOREACH(lle, lleh, lle_next) { if (lle->la_flags & LLE_DELETED) continue; if (IN6_ARE_ADDR_EQUAL(&lle->r_l3addr.addr6, dst)) break; } return (lle); } static void in6_lltable_delete_entry(struct lltable *llt, struct llentry *lle) { lle->la_flags |= LLE_DELETED; /* Leave the solicited multicast group. */ if ((lle->la_flags & LLE_PUB) != 0) in6_leave_proxy_ndp_mc(llt->llt_ifp, &lle->r_l3addr.addr6); EVENTHANDLER_INVOKE(lle_event, lle, LLENTRY_DELETED); #ifdef DIAGNOSTIC log(LOG_INFO, "ifaddr cache = %p is deleted\n", lle); #endif llentry_free(lle); } static struct llentry * in6_lltable_alloc(struct lltable *llt, u_int flags, const struct sockaddr *l3addr) { const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)l3addr; struct ifnet *ifp = llt->llt_ifp; struct llentry *lle; char linkhdr[LLE_MAX_LINKHDR]; size_t linkhdrsize; int lladdr_off; KASSERT(l3addr->sa_family == AF_INET6, ("sin_family %d", l3addr->sa_family)); /* * A route that covers the given address must have * been installed 1st because we are doing a resolution, * verify this. */ if (!(flags & LLE_IFADDR) && in6_lltable_rtcheck(ifp, flags, l3addr) != 0) return (NULL); lle = in6_lltable_new(&sin6->sin6_addr, flags); if (lle == NULL) { log(LOG_INFO, "lla_lookup: new lle malloc failed\n"); return (NULL); } lle->la_flags = flags; if ((flags & LLE_IFADDR) == LLE_IFADDR) { linkhdrsize = LLE_MAX_LINKHDR; if (lltable_calc_llheader(ifp, AF_INET6, IF_LLADDR(ifp), linkhdr, &linkhdrsize, &lladdr_off) != 0) { in6_lltable_free_entry(llt, lle); return (NULL); } lltable_set_entry_addr(ifp, lle, linkhdr, linkhdrsize, lladdr_off); lle->la_flags |= LLE_STATIC; } if ((lle->la_flags & LLE_STATIC) != 0) lle->ln_state = ND6_LLINFO_REACHABLE; return (lle); } static struct llentry * in6_lltable_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3addr) { const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)l3addr; int family = flags >> 16; struct llentry *lle; IF_AFDATA_LOCK_ASSERT(llt->llt_ifp); KASSERT(l3addr->sa_family == AF_INET6, ("sin_family %d", l3addr->sa_family)); KASSERT((flags & (LLE_UNLOCKED | LLE_EXCLUSIVE)) != (LLE_UNLOCKED | LLE_EXCLUSIVE), ("wrong lle request flags: %#x", flags)); lle = in6_lltable_find_dst(llt, &sin6->sin6_addr); if (__predict_false(family != AF_INET6)) lle = llentry_lookup_family(lle, family); if (lle == NULL) return (NULL); if (flags & LLE_UNLOCKED) return (lle); if (flags & LLE_EXCLUSIVE) LLE_WLOCK(lle); else LLE_RLOCK(lle); /* * If the afdata lock is not held, the LLE may have been unlinked while * we were blocked on the LLE lock. Check for this case. */ if (__predict_false((lle->la_flags & LLE_LINKED) == 0)) { if (flags & LLE_EXCLUSIVE) LLE_WUNLOCK(lle); else LLE_RUNLOCK(lle); return (NULL); } return (lle); } static int in6_lltable_dump_entry(struct lltable *llt, struct llentry *lle, struct sysctl_req *wr) { struct ifnet *ifp = llt->llt_ifp; /* XXX stack use */ struct { struct rt_msghdr rtm; struct sockaddr_in6 sin6; /* * ndp.c assumes that sdl is word aligned */ #ifdef __LP64__ uint32_t pad; #endif struct sockaddr_dl sdl; } ndpc; struct sockaddr_dl *sdl; int error; bzero(&ndpc, sizeof(ndpc)); /* skip deleted entries */ if ((lle->la_flags & LLE_DELETED) == LLE_DELETED) return (0); /* Skip if jailed and not a valid IP of the prison. */ lltable_fill_sa_entry(lle, (struct sockaddr *)&ndpc.sin6); if (prison_if(wr->td->td_ucred, (struct sockaddr *)&ndpc.sin6) != 0) return (0); /* * produce a msg made of: * struct rt_msghdr; * struct sockaddr_in6 (IPv6) * struct sockaddr_dl; */ ndpc.rtm.rtm_msglen = sizeof(ndpc); ndpc.rtm.rtm_version = RTM_VERSION; ndpc.rtm.rtm_type = RTM_GET; ndpc.rtm.rtm_flags = RTF_UP; ndpc.rtm.rtm_addrs = RTA_DST | RTA_GATEWAY; sa6_recoverscope(&ndpc.sin6); /* publish */ if (lle->la_flags & LLE_PUB) ndpc.rtm.rtm_flags |= RTF_ANNOUNCE; sdl = &ndpc.sdl; sdl->sdl_family = AF_LINK; sdl->sdl_len = sizeof(*sdl); sdl->sdl_index = ifp->if_index; sdl->sdl_type = ifp->if_type; if ((lle->la_flags & LLE_VALID) == LLE_VALID) { sdl->sdl_alen = ifp->if_addrlen; bcopy(lle->ll_addr, LLADDR(sdl), ifp->if_addrlen); } else { sdl->sdl_alen = 0; bzero(LLADDR(sdl), ifp->if_addrlen); } if (lle->la_expire != 0) ndpc.rtm.rtm_rmx.rmx_expire = lle->la_expire + lle->lle_remtime / hz + time_second - time_uptime; ndpc.rtm.rtm_flags |= (RTF_HOST | RTF_LLDATA); if (lle->la_flags & LLE_STATIC) ndpc.rtm.rtm_flags |= RTF_STATIC; if (lle->la_flags & LLE_IFADDR) ndpc.rtm.rtm_flags |= RTF_PINNED; if (lle->ln_router != 0) ndpc.rtm.rtm_flags |= RTF_GATEWAY; ndpc.rtm.rtm_rmx.rmx_pksent = lle->la_asked; /* Store state in rmx_weight value */ ndpc.rtm.rtm_rmx.rmx_state = lle->ln_state; ndpc.rtm.rtm_index = ifp->if_index; error = SYSCTL_OUT(wr, &ndpc, sizeof(ndpc)); return (error); } static void in6_lltable_post_resolved(struct lltable *llt, struct llentry *lle) { /* Join the solicited multicast group for dst. */ if ((lle->la_flags & LLE_PUB) == LLE_PUB) in6_join_proxy_ndp_mc(llt->llt_ifp, &lle->r_l3addr.addr6); } static struct lltable * in6_lltattach(struct ifnet *ifp) { struct lltable *llt; llt = lltable_allocate_htbl(IN6_LLTBL_DEFAULT_HSIZE); llt->llt_af = AF_INET6; llt->llt_ifp = ifp; llt->llt_lookup = in6_lltable_lookup; llt->llt_alloc_entry = in6_lltable_alloc; llt->llt_delete_entry = in6_lltable_delete_entry; llt->llt_dump_entry = in6_lltable_dump_entry; llt->llt_hash = in6_lltable_hash; llt->llt_fill_sa_entry = in6_lltable_fill_sa_entry; llt->llt_free_entry = in6_lltable_free_entry; llt->llt_match_prefix = in6_lltable_match_prefix; llt->llt_mark_used = llentry_mark_used; llt->llt_post_resolved = in6_lltable_post_resolved; lltable_link(llt); return (llt); } struct lltable * in6_lltable_get(struct ifnet *ifp) { struct lltable *llt = NULL; void *afdata_ptr = ifp->if_afdata[AF_INET6]; if (afdata_ptr != NULL) llt = ((struct in6_ifextra *)afdata_ptr)->lltable; return (llt); } void * in6_domifattach(struct ifnet *ifp) { struct in6_ifextra *ext; /* There are not IPv6-capable interfaces. */ switch (ifp->if_type) { case IFT_PFLOG: case IFT_PFSYNC: case IFT_USB: return (NULL); } ext = (struct in6_ifextra *)malloc(sizeof(*ext), M_IFADDR, M_WAITOK); bzero(ext, sizeof(*ext)); ext->in6_ifstat = malloc(sizeof(counter_u64_t) * sizeof(struct in6_ifstat) / sizeof(uint64_t), M_IFADDR, M_WAITOK); COUNTER_ARRAY_ALLOC(ext->in6_ifstat, sizeof(struct in6_ifstat) / sizeof(uint64_t), M_WAITOK); ext->icmp6_ifstat = malloc(sizeof(counter_u64_t) * sizeof(struct icmp6_ifstat) / sizeof(uint64_t), M_IFADDR, M_WAITOK); COUNTER_ARRAY_ALLOC(ext->icmp6_ifstat, sizeof(struct icmp6_ifstat) / sizeof(uint64_t), M_WAITOK); ext->nd_ifinfo = nd6_ifattach(ifp); ext->scope6_id = scope6_ifattach(ifp); ext->lltable = in6_lltattach(ifp); ext->mld_ifinfo = mld_domifattach(ifp); return ext; } int in6_domifmtu(struct ifnet *ifp) { if (ifp->if_afdata[AF_INET6] == NULL) return ifp->if_mtu; return (IN6_LINKMTU(ifp)); } void in6_domifdetach(struct ifnet *ifp, void *aux) { struct in6_ifextra *ext = (struct in6_ifextra *)aux; mld_domifdetach(ifp); scope6_ifdetach(ext->scope6_id); nd6_ifdetach(ifp, ext->nd_ifinfo); lltable_free(ext->lltable); COUNTER_ARRAY_FREE(ext->in6_ifstat, sizeof(struct in6_ifstat) / sizeof(uint64_t)); free(ext->in6_ifstat, M_IFADDR); COUNTER_ARRAY_FREE(ext->icmp6_ifstat, sizeof(struct icmp6_ifstat) / sizeof(uint64_t)); free(ext->icmp6_ifstat, M_IFADDR); free(ext, M_IFADDR); } /* * Convert sockaddr_in6 to sockaddr_in. Original sockaddr_in6 must be * v4 mapped addr or v4 compat addr */ void in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) { bzero(sin, sizeof(*sin)); sin->sin_len = sizeof(struct sockaddr_in); sin->sin_family = AF_INET; sin->sin_port = sin6->sin6_port; sin->sin_addr.s_addr = sin6->sin6_addr.s6_addr32[3]; } /* Convert sockaddr_in to sockaddr_in6 in v4 mapped addr format. */ void in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) { bzero(sin6, sizeof(*sin6)); sin6->sin6_len = sizeof(struct sockaddr_in6); sin6->sin6_family = AF_INET6; sin6->sin6_port = sin->sin_port; sin6->sin6_addr.s6_addr32[0] = 0; sin6->sin6_addr.s6_addr32[1] = 0; sin6->sin6_addr.s6_addr32[2] = IPV6_ADDR_INT32_SMP; sin6->sin6_addr.s6_addr32[3] = sin->sin_addr.s_addr; } /* Convert sockaddr_in6 into sockaddr_in. */ void in6_sin6_2_sin_in_sock(struct sockaddr *nam) { struct sockaddr_in *sin_p; struct sockaddr_in6 sin6; /* * Save original sockaddr_in6 addr and convert it * to sockaddr_in. */ sin6 = *(struct sockaddr_in6 *)nam; sin_p = (struct sockaddr_in *)nam; in6_sin6_2_sin(sin_p, &sin6); } -/* Convert sockaddr_in into sockaddr_in6 in v4 mapped addr format. */ -void -in6_sin_2_v4mapsin6_in_sock(struct sockaddr **nam) -{ - struct sockaddr_in *sin_p; - struct sockaddr_in6 *sin6_p; - - sin6_p = malloc(sizeof *sin6_p, M_SONAME, M_WAITOK); - sin_p = (struct sockaddr_in *)*nam; - in6_sin_2_v4mapsin6(sin_p, sin6_p); - free(*nam, M_SONAME); - *nam = (struct sockaddr *)sin6_p; -} - /* * Join/leave the solicited multicast groups for proxy NDP entries. */ static void in6_join_proxy_ndp_mc(struct ifnet *ifp, const struct in6_addr *dst) { struct in6_multi *inm; struct in6_addr mltaddr; char ip6buf[INET6_ADDRSTRLEN]; int error; if (in6_solicited_node_maddr(&mltaddr, ifp, dst) != 0) return; /* error logged in in6_solicited_node_maddr. */ error = in6_joingroup(ifp, &mltaddr, NULL, &inm, 0); if (error != 0) { nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s (errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr), if_name(ifp), error)); } } static void in6_leave_proxy_ndp_mc(struct ifnet *ifp, const struct in6_addr *dst) { struct epoch_tracker et; struct in6_multi *inm; struct in6_addr mltaddr; char ip6buf[INET6_ADDRSTRLEN]; if (in6_solicited_node_maddr(&mltaddr, ifp, dst) != 0) return; /* error logged in in6_solicited_node_maddr. */ NET_EPOCH_ENTER(et); inm = in6m_lookup(ifp, &mltaddr); NET_EPOCH_EXIT(et); if (inm != NULL) in6_leavegroup(inm, NULL); else nd6log((LOG_WARNING, "%s: in6m_lookup failed for %s on %s\n", __func__, ip6_sprintf(ip6buf, &mltaddr), if_name(ifp))); } static bool in6_lle_match_pub(struct lltable *llt, struct llentry *lle, void *farg) { return ((lle->la_flags & LLE_PUB) != 0); } void in6_purge_proxy_ndp(struct ifnet *ifp) { struct lltable *llt; bool need_purge; if (ifp->if_afdata[AF_INET6] == NULL) return; llt = LLTABLE6(ifp); IF_AFDATA_WLOCK(ifp); need_purge = ((llt->llt_flags & LLT_ADDEDPROXY) != 0); IF_AFDATA_WUNLOCK(ifp); /* * Ever added proxy ndp entries, leave solicited node multicast * before deleting the llentry. */ if (need_purge) lltable_delete_conditional(llt, in6_lle_match_pub, NULL); } diff --git a/sys/netinet6/in6.h b/sys/netinet6/in6.h index 66dcce8478ab..53a8fad106df 100644 --- a/sys/netinet6/in6.h +++ b/sys/netinet6/in6.h @@ -1,743 +1,742 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $KAME: in6.h,v 1.89 2001/05/27 13:28:35 itojun Exp $ */ /*- * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __KAME_NETINET_IN_H_INCLUDED_ #error "do not include netinet6/in6.h directly, include netinet/in.h. see RFC2553" #endif #ifndef _NETINET6_IN6_H_ #define _NETINET6_IN6_H_ /* * Identification of the network protocol stack * for *BSD-current/release: http://www.kame.net/dev/cvsweb.cgi/kame/COVERAGE * has the table of implementation/integration differences. */ #define __KAME__ #define __KAME_VERSION "FreeBSD" /* * IPv6 port allocation rules should mirror the IPv4 rules and are controlled * by the net.inet.ip.portrange sysctl tree. The following defines exist * for compatibility with userland applications that need them. */ #if __BSD_VISIBLE #define IPV6PORT_RESERVED 1024 #define IPV6PORT_ANONMIN 49152 #define IPV6PORT_ANONMAX 65535 #define IPV6PORT_RESERVEDMIN 600 #define IPV6PORT_RESERVEDMAX (IPV6PORT_RESERVED-1) #endif /* * IPv6 address */ struct in6_addr { union { uint8_t __u6_addr8[16]; uint16_t __u6_addr16[8]; uint32_t __u6_addr32[4]; } __u6_addr; /* 128-bit IP6 address */ }; #define s6_addr __u6_addr.__u6_addr8 #if defined(_KERNEL) || defined(_STANDALONE) /* XXX nonstandard */ #define s6_addr8 __u6_addr.__u6_addr8 #define s6_addr16 __u6_addr.__u6_addr16 #define s6_addr32 __u6_addr.__u6_addr32 #endif #define INET6_ADDRSTRLEN 46 /* * XXX missing POSIX.1-2001 macro IPPROTO_IPV6. */ /* * Socket address for IPv6 */ #if __BSD_VISIBLE #define SIN6_LEN #endif struct sockaddr_in6 { uint8_t sin6_len; /* length of this struct */ sa_family_t sin6_family; /* AF_INET6 */ in_port_t sin6_port; /* Transport layer port # */ uint32_t sin6_flowinfo; /* IP6 flow information */ struct in6_addr sin6_addr; /* IP6 address */ uint32_t sin6_scope_id; /* scope zone index */ }; /* * Local definition for masks */ #ifdef _KERNEL /* XXX nonstandard */ #define IN6MASK0 {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} #define IN6MASK32 {{{ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} #define IN6MASK64 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} #define IN6MASK96 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }}} #define IN6MASK128 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} #endif #ifdef _KERNEL extern const struct sockaddr_in6 sa6_any; extern const struct in6_addr in6mask0; extern const struct in6_addr in6mask32; extern const struct in6_addr in6mask64; extern const struct in6_addr in6mask96; extern const struct in6_addr in6mask128; #endif /* _KERNEL */ /* * Macros started with IPV6_ADDR is KAME local */ #ifdef _KERNEL /* XXX nonstandard */ #if _BYTE_ORDER == _BIG_ENDIAN #define IPV6_ADDR_INT32_ONE 1 #define IPV6_ADDR_INT32_TWO 2 #define IPV6_ADDR_INT32_MNL 0xff010000 #define IPV6_ADDR_INT32_MLL 0xff020000 #define IPV6_ADDR_INT32_SMP 0x0000ffff #define IPV6_ADDR_INT16_ULL 0xfe80 #define IPV6_ADDR_INT16_USL 0xfec0 #define IPV6_ADDR_INT16_MLL 0xff02 #elif _BYTE_ORDER == _LITTLE_ENDIAN #define IPV6_ADDR_INT32_ONE 0x01000000 #define IPV6_ADDR_INT32_TWO 0x02000000 #define IPV6_ADDR_INT32_MNL 0x000001ff #define IPV6_ADDR_INT32_MLL 0x000002ff #define IPV6_ADDR_INT32_SMP 0xffff0000 #define IPV6_ADDR_INT16_ULL 0x80fe #define IPV6_ADDR_INT16_USL 0xc0fe #define IPV6_ADDR_INT16_MLL 0x02ff #endif #endif /* * Definition of some useful macros to handle IP6 addresses */ #if __BSD_VISIBLE #define IN6ADDR_ANY_INIT \ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} #define IN6ADDR_LOOPBACK_INIT \ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} #define IN6ADDR_NODELOCAL_ALLNODES_INIT \ {{{ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} #define IN6ADDR_INTFACELOCAL_ALLNODES_INIT \ {{{ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} #define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} #define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }}} #define IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT \ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16 }}} #endif extern const struct in6_addr in6addr_any; extern const struct in6_addr in6addr_loopback; #if __BSD_VISIBLE extern const struct in6_addr in6addr_nodelocal_allnodes; extern const struct in6_addr in6addr_linklocal_allnodes; extern const struct in6_addr in6addr_linklocal_allrouters; extern const struct in6_addr in6addr_linklocal_allv2routers; #endif /* * Equality */ #if __BSD_VISIBLE #define IN6_ARE_ADDR_EQUAL(a, b) \ (memcmp(&(a)->s6_addr[0], &(b)->s6_addr[0], sizeof(struct in6_addr)) == 0) #endif /* * Unspecified */ #define IN6_IS_ADDR_UNSPECIFIED(a) \ ((a)->__u6_addr.__u6_addr32[0] == 0 && \ (a)->__u6_addr.__u6_addr32[1] == 0 && \ (a)->__u6_addr.__u6_addr32[2] == 0 && \ (a)->__u6_addr.__u6_addr32[3] == 0) /* * Loopback */ #define IN6_IS_ADDR_LOOPBACK(a) \ ((a)->__u6_addr.__u6_addr32[0] == 0 && \ (a)->__u6_addr.__u6_addr32[1] == 0 && \ (a)->__u6_addr.__u6_addr32[2] == 0 && \ (a)->__u6_addr.__u6_addr32[3] == ntohl(1)) /* * IPv4 compatible */ #define IN6_IS_ADDR_V4COMPAT(a) \ ((a)->__u6_addr.__u6_addr32[0] == 0 && \ (a)->__u6_addr.__u6_addr32[1] == 0 && \ (a)->__u6_addr.__u6_addr32[2] == 0 && \ (a)->__u6_addr.__u6_addr32[3] != 0 && \ (a)->__u6_addr.__u6_addr32[3] != ntohl(1)) /* * Mapped */ #define IN6_IS_ADDR_V4MAPPED(a) \ ((a)->__u6_addr.__u6_addr32[0] == 0 && \ (a)->__u6_addr.__u6_addr32[1] == 0 && \ (a)->__u6_addr.__u6_addr32[2] == ntohl(0x0000ffff)) /* * KAME Scope Values */ #ifdef _KERNEL /* XXX nonstandard */ #define IPV6_ADDR_SCOPE_NODELOCAL 0x01 #define IPV6_ADDR_SCOPE_INTFACELOCAL 0x01 #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 #define IPV6_ADDR_SCOPE_SITELOCAL 0x05 #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */ #define IPV6_ADDR_SCOPE_GLOBAL 0x0e #else #define __IPV6_ADDR_SCOPE_NODELOCAL 0x01 #define __IPV6_ADDR_SCOPE_INTFACELOCAL 0x01 #define __IPV6_ADDR_SCOPE_LINKLOCAL 0x02 #define __IPV6_ADDR_SCOPE_SITELOCAL 0x05 #define __IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */ #define __IPV6_ADDR_SCOPE_GLOBAL 0x0e #endif /* * Unicast Scope * Note that we must check topmost 10 bits only, not 16 bits (see RFC2373). */ #define IN6_IS_ADDR_LINKLOCAL(a) \ (((a)->s6_addr[0] == 0xfe) && (((a)->s6_addr[1] & 0xc0) == 0x80)) #define IN6_IS_ADDR_SITELOCAL(a) \ (((a)->s6_addr[0] == 0xfe) && (((a)->s6_addr[1] & 0xc0) == 0xc0)) /* * Multicast */ #define IN6_IS_ADDR_MULTICAST(a) ((a)->s6_addr[0] == 0xff) #ifdef _KERNEL /* XXX nonstandard */ #define IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) #else #define __IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) #endif /* * Multicast Scope */ #ifdef _KERNEL /* refers nonstandard items */ #define IN6_IS_ADDR_MC_NODELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_NODELOCAL)) #define IN6_IS_ADDR_MC_INTFACELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_INTFACELOCAL)) #define IN6_IS_ADDR_MC_LINKLOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_LINKLOCAL)) #define IN6_IS_ADDR_MC_SITELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_SITELOCAL)) #define IN6_IS_ADDR_MC_ORGLOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_ORGLOCAL)) #define IN6_IS_ADDR_MC_GLOBAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_GLOBAL)) #else #define IN6_IS_ADDR_MC_NODELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_NODELOCAL)) #define IN6_IS_ADDR_MC_LINKLOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_LINKLOCAL)) #define IN6_IS_ADDR_MC_SITELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_SITELOCAL)) #define IN6_IS_ADDR_MC_ORGLOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_ORGLOCAL)) #define IN6_IS_ADDR_MC_GLOBAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_GLOBAL)) #endif #ifdef _KERNEL /* nonstandard */ /* * KAME Scope */ #define IN6_IS_SCOPE_LINKLOCAL(a) \ ((IN6_IS_ADDR_LINKLOCAL(a)) || \ (IN6_IS_ADDR_MC_LINKLOCAL(a))) #define IN6_IS_SCOPE_EMBED(a) \ ((IN6_IS_ADDR_LINKLOCAL(a)) || \ (IN6_IS_ADDR_MC_LINKLOCAL(a)) || \ (IN6_IS_ADDR_MC_INTFACELOCAL(a))) #define IFA6_IS_DEPRECATED(a) \ ((a)->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME && \ (u_int32_t)((time_uptime - (a)->ia6_updatetime)) > \ (a)->ia6_lifetime.ia6t_pltime) #define IFA6_IS_INVALID(a) \ ((a)->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME && \ (u_int32_t)((time_uptime - (a)->ia6_updatetime)) > \ (a)->ia6_lifetime.ia6t_vltime) #endif /* _KERNEL */ /* * IP6 route structure */ #if __BSD_VISIBLE struct nhop_object; struct route_in6 { struct nhop_object *ro_nh; struct llentry *ro_lle; /* * ro_prepend and ro_plen are only used for bpf to pass in a * preformed header. They are not cacheable. */ char *ro_prepend; uint16_t ro_plen; uint16_t ro_flags; uint16_t ro_mtu; /* saved ro_rt mtu */ uint16_t spare; struct sockaddr_in6 ro_dst; }; #endif #ifdef _KERNEL #define MTAG_ABI_IPV6 1444287380 /* IPv6 ABI */ #define IPV6_TAG_DIRECT 0 /* direct-dispatch IPv6 */ #endif /* _KERNEL */ /* * Options for use with [gs]etsockopt at the IPV6 level. * First word of comment is data type; bool is stored in int. */ /* no hdrincl */ #if 0 /* the followings are relic in IPv4 and hence are disabled */ #define IPV6_OPTIONS 1 /* buf/ip6_opts; set/get IP6 options */ #define IPV6_RECVOPTS 5 /* bool; receive all IP6 opts w/dgram */ #define IPV6_RECVRETOPTS 6 /* bool; receive IP6 opts for response */ #define IPV6_RECVDSTADDR 7 /* bool; receive IP6 dst addr w/dgram */ #define IPV6_RETOPTS 8 /* ip6_opts; set/get IP6 options */ #endif #define IPV6_SOCKOPT_RESERVED1 3 /* reserved for future use */ #define IPV6_UNICAST_HOPS 4 /* int; IP6 hops */ #define IPV6_MULTICAST_IF 9 /* u_int; set/get IP6 multicast i/f */ #define IPV6_MULTICAST_HOPS 10 /* int; set/get IP6 multicast hops */ #define IPV6_MULTICAST_LOOP 11 /* u_int; set/get IP6 multicast loopback */ #define IPV6_JOIN_GROUP 12 /* ipv6_mreq; join a group membership */ #define IPV6_LEAVE_GROUP 13 /* ipv6_mreq; leave a group membership */ #define IPV6_PORTRANGE 14 /* int; range to choose for unspec port */ #define ICMP6_FILTER 18 /* icmp6_filter; icmp6 filter */ /* RFC2292 options */ #ifdef _KERNEL #define IPV6_2292PKTINFO 19 /* bool; send/recv if, src/dst addr */ #define IPV6_2292HOPLIMIT 20 /* bool; hop limit */ #define IPV6_2292NEXTHOP 21 /* bool; next hop addr */ #define IPV6_2292HOPOPTS 22 /* bool; hop-by-hop option */ #define IPV6_2292DSTOPTS 23 /* bool; destinaion option */ #define IPV6_2292RTHDR 24 /* bool; routing header */ #define IPV6_2292PKTOPTIONS 25 /* buf/cmsghdr; set/get IPv6 options */ #endif #define IPV6_CHECKSUM 26 /* int; checksum offset for raw socket */ #define IPV6_V6ONLY 27 /* bool; make AF_INET6 sockets v6 only */ #ifndef _KERNEL #define IPV6_BINDV6ONLY IPV6_V6ONLY #endif #define IPV6_IPSEC_POLICY 28 /* struct; get/set security policy */ /* 29; unused; was IPV6_FAITH */ #if 1 /* IPV6FIREWALL */ #define IPV6_FW_ADD 30 /* add a firewall rule to chain */ #define IPV6_FW_DEL 31 /* delete a firewall rule from chain */ #define IPV6_FW_FLUSH 32 /* flush firewall rule chain */ #define IPV6_FW_ZERO 33 /* clear single/all firewall counter(s) */ #define IPV6_FW_GET 34 /* get entire firewall rule chain */ #endif /* new socket options introduced in RFC3542 */ #define IPV6_RTHDRDSTOPTS 35 /* ip6_dest; send dst option before rthdr */ #define IPV6_RECVPKTINFO 36 /* bool; recv if, dst addr */ #define IPV6_RECVHOPLIMIT 37 /* bool; recv hop limit */ #define IPV6_RECVRTHDR 38 /* bool; recv routing header */ #define IPV6_RECVHOPOPTS 39 /* bool; recv hop-by-hop option */ #define IPV6_RECVDSTOPTS 40 /* bool; recv dst option after rthdr */ #ifdef _KERNEL #define IPV6_RECVRTHDRDSTOPTS 41 /* bool; recv dst option before rthdr */ #endif #define IPV6_USE_MIN_MTU 42 /* bool; send packets at the minimum MTU */ #define IPV6_RECVPATHMTU 43 /* bool; notify an according MTU */ #define IPV6_PATHMTU 44 /* mtuinfo; get the current path MTU (sopt), 4 bytes int; MTU notification (cmsg) */ #if 0 /*obsoleted during 2292bis -> 3542*/ #define IPV6_REACHCONF 45 /* no data; ND reachability confirm (cmsg only/not in of RFC3542) */ #endif /* more new socket options introduced in RFC3542 */ #define IPV6_PKTINFO 46 /* in6_pktinfo; send if, src addr */ #define IPV6_HOPLIMIT 47 /* int; send hop limit */ #define IPV6_NEXTHOP 48 /* sockaddr; next hop addr */ #define IPV6_HOPOPTS 49 /* ip6_hbh; send hop-by-hop option */ #define IPV6_DSTOPTS 50 /* ip6_dest; send dst option befor rthdr */ #define IPV6_RTHDR 51 /* ip6_rthdr; send routing header */ #if 0 #define IPV6_PKTOPTIONS 52 /* buf/cmsghdr; set/get IPv6 options */ /* obsoleted by RFC3542 */ #endif #define IPV6_RECVTCLASS 57 /* bool; recv traffic class values */ #define IPV6_AUTOFLOWLABEL 59 /* bool; attach flowlabel automagically */ #define IPV6_TCLASS 61 /* int; send traffic class value */ #define IPV6_DONTFRAG 62 /* bool; disable IPv6 fragmentation */ #define IPV6_PREFER_TEMPADDR 63 /* int; prefer temporary addresses as * the source address. */ #define IPV6_BINDANY 64 /* bool: allow bind to any address */ /* unused; was IPV6_BIND_MULTI */ /* unused; was IPV6_RSS_LISTEN_BUCKET */ #define IPV6_FLOWID 67 /* int; flowid of given socket */ #define IPV6_FLOWTYPE 68 /* int; flowtype of given socket */ #define IPV6_RSSBUCKETID 69 /* int; RSS bucket ID of given socket */ #define IPV6_RECVFLOWID 70 /* bool; receive IP6 flowid/flowtype w/ datagram */ #define IPV6_RECVRSSBUCKETID 71 /* bool; receive IP6 RSS bucket id w/ datagram */ #define IPV6_ORIGDSTADDR 72 /* bool: allow getting dstaddr /port info */ #define IPV6_RECVORIGDSTADDR IPV6_ORIGDSTADDR /* * The following option is private; do not use it from user applications. * It is deliberately defined to the same value as IP_MSFILTER. */ #define IPV6_MSFILTER 74 /* struct __msfilterreq; * set/get multicast source filter list. */ /* The following option deals with the 802.1Q Ethernet Priority Code Point */ #define IPV6_VLAN_PCP 75 /* int; set/get PCP used for packet, */ /* -1 use interface default */ /* to define items, should talk with KAME guys first, for *BSD compatibility */ #define IPV6_RTHDR_LOOSE 0 /* this hop need not be a neighbor. XXX old spec */ #define IPV6_RTHDR_STRICT 1 /* this hop must be a neighbor. XXX old spec */ #define IPV6_RTHDR_TYPE_0 0 /* IPv6 routing header type 0 */ /* * Defaults and limits for options */ #define IPV6_DEFAULT_MULTICAST_HOPS 1 /* normally limit m'casts to 1 hop */ #define IPV6_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */ /* * Limit for IPv6 multicast memberships */ #define IPV6_MAX_MEMBERSHIPS 4095 /* * Default resource limits for IPv6 multicast source filtering. * These may be modified by sysctl. */ #define IPV6_MAX_GROUP_SRC_FILTER 512 /* sources per group */ #define IPV6_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */ /* * Argument structure for IPV6_JOIN_GROUP and IPV6_LEAVE_GROUP. */ struct ipv6_mreq { struct in6_addr ipv6mr_multiaddr; unsigned int ipv6mr_interface; }; /* * IPV6_PKTINFO: Packet information(RFC2292 sec 5) */ struct in6_pktinfo { struct in6_addr ipi6_addr; /* src/dst IPv6 address */ unsigned int ipi6_ifindex; /* send/recv interface index */ }; /* * Control structure for IPV6_RECVPATHMTU socket option. */ struct ip6_mtuinfo { struct sockaddr_in6 ip6m_addr; /* or sockaddr_storage? */ uint32_t ip6m_mtu; }; /* * Argument for IPV6_PORTRANGE: * - which range to search when port is unspecified at bind() or connect() */ #define IPV6_PORTRANGE_DEFAULT 0 /* default range */ #define IPV6_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */ #define IPV6_PORTRANGE_LOW 2 /* "low" - vouchsafe security */ #if __BSD_VISIBLE /* * Definitions for inet6 sysctl operations. * * Third level is protocol number. * Fourth level is desired variable within that protocol. */ #define IPV6PROTO_MAXID (IPPROTO_PIM + 1) /* don't list to IPV6PROTO_MAX */ /* * Names for IP sysctl objects */ #define IPV6CTL_FORWARDING 1 /* act as router */ #define IPV6CTL_SENDREDIRECTS 2 /* may send redirects when forwarding*/ #define IPV6CTL_DEFHLIM 3 /* default Hop-Limit */ #ifdef notyet #define IPV6CTL_DEFMTU 4 /* default MTU */ #endif #define IPV6CTL_FORWSRCRT 5 /* forward source-routed dgrams */ #define IPV6CTL_STATS 6 /* stats */ #define IPV6CTL_MRTSTATS 7 /* multicast forwarding stats */ #define IPV6CTL_MRTPROTO 8 /* multicast routing protocol */ #define IPV6CTL_MAXFRAGPACKETS 9 /* max packets reassembly queue */ #define IPV6CTL_SOURCECHECK 10 /* verify source route and intf */ #define IPV6CTL_SOURCECHECK_LOGINT 11 /* minimume logging interval */ #define IPV6CTL_ACCEPT_RTADV 12 /* 13; unused; was: IPV6CTL_KEEPFAITH */ #define IPV6CTL_LOG_INTERVAL 14 #define IPV6CTL_HDRNESTLIMIT 15 #define IPV6CTL_DAD_COUNT 16 #define IPV6CTL_AUTO_FLOWLABEL 17 #define IPV6CTL_DEFMCASTHLIM 18 #define IPV6CTL_GIF_HLIM 19 /* default HLIM for gif encap packet */ #define IPV6CTL_KAME_VERSION 20 #define IPV6CTL_USE_DEPRECATED 21 /* use deprecated addr (RFC2462 5.5.4) */ #define IPV6CTL_RR_PRUNE 22 /* walk timer for router renumbering */ #if 0 /* obsolete */ #define IPV6CTL_MAPPED_ADDR 23 #endif #define IPV6CTL_V6ONLY 24 /* IPV6CTL_RTEXPIRE 25 deprecated */ /* IPV6CTL_RTMINEXPIRE 26 deprecated */ /* IPV6CTL_RTMAXCACHE 27 deprecated */ #define IPV6CTL_USETEMPADDR 32 /* use temporary addresses (RFC3041) */ #define IPV6CTL_TEMPPLTIME 33 /* preferred lifetime for tmpaddrs */ #define IPV6CTL_TEMPVLTIME 34 /* valid lifetime for tmpaddrs */ #define IPV6CTL_AUTO_LINKLOCAL 35 /* automatic link-local addr assign */ #define IPV6CTL_RIP6STATS 36 /* raw_ip6 stats */ #define IPV6CTL_PREFER_TEMPADDR 37 /* prefer temporary addr as src */ #define IPV6CTL_ADDRCTLPOLICY 38 /* get/set address selection policy */ #define IPV6CTL_USE_DEFAULTZONE 39 /* use default scope zone */ #define IPV6CTL_MAXFRAGS 41 /* max fragments */ #if 0 #define IPV6CTL_IFQ 42 /* ip6intrq node */ #define IPV6CTL_ISATAPRTR 43 /* isatap router */ #endif #define IPV6CTL_MCAST_PMTU 44 /* enable pMTU discovery for multicast? */ /* New entries should be added here from current IPV6CTL_MAXID value. */ /* to define items, should talk with KAME guys first, for *BSD compatibility */ #define IPV6CTL_STEALTH 45 #define ICMPV6CTL_ND6_ONLINKNSRFC4861 47 #define IPV6CTL_NO_RADR 48 /* No defroute from RA */ #define IPV6CTL_NORBIT_RAIF 49 /* Disable R-bit in NA on RA * receiving IF. */ #define IPV6CTL_RFC6204W3 50 /* Accept defroute even when forwarding enabled */ #define IPV6CTL_INTRQMAXLEN 51 /* max length of IPv6 netisr queue */ #define IPV6CTL_INTRDQMAXLEN 52 /* max length of direct IPv6 netisr * queue */ #define IPV6CTL_MAXFRAGSPERPACKET 53 /* Max fragments per packet */ #define IPV6CTL_MAXFRAGBUCKETSIZE 54 /* Max reassembly queues per bucket */ #define IPV6CTL_MAXID 55 #endif /* __BSD_VISIBLE */ /* * Since both netinet/ and netinet6/ call into netipsec/ and netpfil/, * the protocol specific mbuf flags are shared between them. */ #define M_FASTFWD_OURS M_PROTO1 /* changed dst to local */ #define M_IP6_NEXTHOP M_PROTO2 /* explicit ip nexthop */ #define M_IP_NEXTHOP M_PROTO2 /* explicit ip nexthop */ #define M_SKIP_FIREWALL M_PROTO3 /* skip firewall processing */ #define M_AUTHIPHDR M_PROTO4 #define M_DECRYPTED M_PROTO5 #define M_LOOP M_PROTO6 #define M_AUTHIPDGM M_PROTO7 #define M_RTALERT_MLD M_PROTO8 #define M_FRAGMENTED M_PROTO9 /* contained fragment header */ #ifdef _KERNEL struct cmsghdr; struct ip6_hdr; int in6_cksum(struct mbuf *, uint8_t, uint32_t, uint32_t); int in6_cksum_partial(struct mbuf *, uint8_t, uint32_t, uint32_t, uint32_t); int in6_cksum_pseudo(struct ip6_hdr *, uint32_t, uint8_t, uint16_t); int in6_localaddr(struct in6_addr *); int in6_localip(struct in6_addr *); bool in6_localip_fib(struct in6_addr *, uint16_t); int in6_ifhasaddr(struct ifnet *, struct in6_addr *); int in6_addrscope(const struct in6_addr *); char *ip6_sprintf(char *, const struct in6_addr *); struct in6_ifaddr *in6_ifawithifp(struct ifnet *, struct in6_addr *); extern void in6_if_up(struct ifnet *); struct sockaddr; void in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6); void in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6); void in6_sin6_2_sin_in_sock(struct sockaddr *nam); -void in6_sin_2_v4mapsin6_in_sock(struct sockaddr **nam); extern void addrsel_policy_init(void); #define satosin6(sa) ((struct sockaddr_in6 *)(sa)) #define sin6tosa(sin6) ((struct sockaddr *)(sin6)) #define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa)) #endif /* _KERNEL */ #ifndef _SIZE_T_DECLARED typedef __size_t size_t; #define _SIZE_T_DECLARED #endif #ifndef _SOCKLEN_T_DECLARED typedef __socklen_t socklen_t; #define _SOCKLEN_T_DECLARED #endif #if __BSD_VISIBLE __BEGIN_DECLS struct cmsghdr; extern int inet6_option_space(int); extern int inet6_option_init(void *, struct cmsghdr **, int); extern int inet6_option_append(struct cmsghdr *, const uint8_t *, int, int); extern uint8_t *inet6_option_alloc(struct cmsghdr *, int, int, int); extern int inet6_option_next(const struct cmsghdr *, uint8_t **); extern int inet6_option_find(const struct cmsghdr *, uint8_t **, int); extern size_t inet6_rthdr_space(int, int); extern struct cmsghdr *inet6_rthdr_init(void *, int); extern int inet6_rthdr_add(struct cmsghdr *, const struct in6_addr *, unsigned int); extern int inet6_rthdr_lasthop(struct cmsghdr *, unsigned int); #if 0 /* not implemented yet */ extern int inet6_rthdr_reverse(const struct cmsghdr *, struct cmsghdr *); #endif extern int inet6_rthdr_segments(const struct cmsghdr *); extern struct in6_addr *inet6_rthdr_getaddr(struct cmsghdr *, int); extern int inet6_rthdr_getflags(const struct cmsghdr *, int); extern int inet6_opt_init(void *, socklen_t); extern int inet6_opt_append(void *, socklen_t, int, uint8_t, socklen_t, uint8_t, void **); extern int inet6_opt_finish(void *, socklen_t, int); extern int inet6_opt_set_val(void *, int, void *, socklen_t); extern int inet6_opt_next(void *, socklen_t, int, uint8_t *, socklen_t *, void **); extern int inet6_opt_find(void *, socklen_t, int, uint8_t, socklen_t *, void **); extern int inet6_opt_get_val(void *, int, void *, socklen_t); extern socklen_t inet6_rth_space(int, int); extern void *inet6_rth_init(void *, socklen_t, int, int); extern int inet6_rth_add(void *, const struct in6_addr *); extern int inet6_rth_reverse(const void *, void *); extern int inet6_rth_segments(const void *); extern struct in6_addr *inet6_rth_getaddr(const void *, int); __END_DECLS #endif /* __BSD_VISIBLE */ #endif /* !_NETINET6_IN6_H_ */ diff --git a/sys/netinet6/in6_pcb.c b/sys/netinet6/in6_pcb.c index d44e5c2cddb5..d18a7283ba05 100644 --- a/sys/netinet6/in6_pcb.c +++ b/sys/netinet6/in6_pcb.c @@ -1,1244 +1,1234 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * Copyright (c) 2010-2011 Juniper Networks, Inc. * All rights reserved. * * Portions of this software were developed by Robert N. M. Watson under * contract to Juniper Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $KAME: in6_pcb.c,v 1.31 2001/05/21 05:45:10 jinmei Exp $ */ /*- * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include "opt_route.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct ucred *cred) { struct socket *so = inp->inp_socket; u_int16_t lport = 0; int error, lookupflags = 0; #ifdef INVARIANTS struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; #endif INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(pcbinfo); error = prison_local_ip6(cred, laddr, ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0)); if (error) return(error); /* XXX: this is redundant when called from in6_pcbbind */ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT|SO_REUSEPORT_LB)) == 0) lookupflags = INPLOOKUP_WILDCARD; inp->inp_flags |= INP_ANONPORT; error = in_pcb_lport(inp, NULL, &lport, cred, lookupflags); if (error != 0) return (error); inp->inp_lport = lport; if (in_pcbinshash(inp) != 0) { inp->in6p_laddr = in6addr_any; inp->inp_lport = 0; return (EAGAIN); } return (0); } int in6_pcbbind(struct inpcb *inp, struct sockaddr_in6 *sin6, struct ucred *cred) { struct socket *so = inp->inp_socket; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; u_short lport = 0; int error, lookupflags = 0; int reuseport = (so->so_options & SO_REUSEPORT); /* * XXX: Maybe we could let SO_REUSEPORT_LB set SO_REUSEPORT bit here * so that we don't have to add to the (already messy) code below. */ int reuseport_lb = (so->so_options & SO_REUSEPORT_LB); INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(pcbinfo); if (inp->inp_lport || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) return (EINVAL); if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT|SO_REUSEPORT_LB)) == 0) lookupflags = INPLOOKUP_WILDCARD; if (sin6 == NULL) { if ((error = prison_local_ip6(cred, &inp->in6p_laddr, ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0))) != 0) return (error); } else { KASSERT(sin6->sin6_family == AF_INET6, ("%s: invalid address family for %p", __func__, sin6)); KASSERT(sin6->sin6_len == sizeof(*sin6), ("%s: invalid address length for %p", __func__, sin6)); if ((error = sa6_embedscope(sin6, V_ip6_use_defzone)) != 0) return(error); if ((error = prison_local_ip6(cred, &sin6->sin6_addr, ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0))) != 0) return (error); lport = sin6->sin6_port; if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { /* * Treat SO_REUSEADDR as SO_REUSEPORT for multicast; * allow compepte duplication of binding if * SO_REUSEPORT is set, or if SO_REUSEADDR is set * and a multicast address is bound on both * new and duplicated sockets. */ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) != 0) reuseport = SO_REUSEADDR|SO_REUSEPORT; /* * XXX: How to deal with SO_REUSEPORT_LB here? * Treat same as SO_REUSEPORT for now. */ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT_LB)) != 0) reuseport_lb = SO_REUSEADDR|SO_REUSEPORT_LB; } else if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct epoch_tracker et; struct ifaddr *ifa; sin6->sin6_port = 0; /* yech... */ NET_EPOCH_ENTER(et); if ((ifa = ifa_ifwithaddr((struct sockaddr *)sin6)) == NULL && (inp->inp_flags & INP_BINDANY) == 0) { NET_EPOCH_EXIT(et); return (EADDRNOTAVAIL); } /* * XXX: bind to an anycast address might accidentally * cause sending a packet with anycast source address. * We should allow to bind to a deprecated address, since * the application dares to use it. */ if (ifa != NULL && ((struct in6_ifaddr *)ifa)->ia6_flags & (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|IN6_IFF_DETACHED)) { NET_EPOCH_EXIT(et); return (EADDRNOTAVAIL); } NET_EPOCH_EXIT(et); } if (lport) { struct inpcb *t; /* GROSS */ if (ntohs(lport) <= V_ipport_reservedhigh && ntohs(lport) >= V_ipport_reservedlow && priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT)) return (EACCES); if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr) && priv_check_cred(inp->inp_cred, PRIV_NETINET_REUSEPORT) != 0) { t = in6_pcblookup_local(pcbinfo, &sin6->sin6_addr, lport, INPLOOKUP_WILDCARD, cred); if (t != NULL && (so->so_type != SOCK_STREAM || IN6_IS_ADDR_UNSPECIFIED(&t->in6p_faddr)) && (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || !IN6_IS_ADDR_UNSPECIFIED(&t->in6p_laddr) || (t->inp_socket->so_options & SO_REUSEPORT) || (t->inp_socket->so_options & SO_REUSEPORT_LB) == 0) && (inp->inp_cred->cr_uid != t->inp_cred->cr_uid)) return (EADDRINUSE); #ifdef INET if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 && IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6); t = in_pcblookup_local(pcbinfo, sin.sin_addr, lport, INPLOOKUP_WILDCARD, cred); if (t != NULL && (so->so_type != SOCK_STREAM || ntohl(t->inp_faddr.s_addr) == INADDR_ANY) && (inp->inp_cred->cr_uid != t->inp_cred->cr_uid)) return (EADDRINUSE); } #endif } t = in6_pcblookup_local(pcbinfo, &sin6->sin6_addr, lport, lookupflags, cred); if (t && (reuseport & t->inp_socket->so_options) == 0 && (reuseport_lb & t->inp_socket->so_options) == 0) { return (EADDRINUSE); } #ifdef INET if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 && IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6); t = in_pcblookup_local(pcbinfo, sin.sin_addr, lport, lookupflags, cred); if (t && (reuseport & t->inp_socket->so_options) == 0 && (reuseport_lb & t->inp_socket->so_options) == 0 && (ntohl(t->inp_laddr.s_addr) != INADDR_ANY || (t->inp_vflag & INP_IPV6PROTO) != 0)) { return (EADDRINUSE); } } #endif } inp->in6p_laddr = sin6->sin6_addr; } if (lport == 0) { if ((error = in6_pcbsetport(&inp->in6p_laddr, inp, cred)) != 0) { /* Undo an address bind that may have occurred. */ inp->in6p_laddr = in6addr_any; return (error); } } else { inp->inp_lport = lport; if (in_pcbinshash(inp) != 0) { inp->in6p_laddr = in6addr_any; inp->inp_lport = 0; return (EAGAIN); } } return (0); } /* * Transform old in6_pcbconnect() into an inner subroutine for new * in6_pcbconnect(): Do some validity-checking on the remote * address (in mbuf 'nam') and then determine local host address * (i.e., which interface) to use to access that remote host. * * This preserves definition of in6_pcbconnect(), while supporting a * slightly different version for T/TCP. (This is more than * a bit of a kludge, but cleaning up the internal interfaces would * have forced minor changes in every protocol). */ static int in6_pcbladdr(struct inpcb *inp, struct sockaddr_in6 *sin6, struct in6_addr *plocal_addr6, bool sas_required) { int error = 0; int scope_ambiguous = 0; struct in6_addr in6a; NET_EPOCH_ASSERT(); INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); /* XXXRW: why? */ if (sin6->sin6_port == 0) return (EADDRNOTAVAIL); if (sin6->sin6_scope_id == 0 && !V_ip6_use_defzone) scope_ambiguous = 1; if ((error = sa6_embedscope(sin6, V_ip6_use_defzone)) != 0) return(error); if (!CK_STAILQ_EMPTY(&V_in6_ifaddrhead)) { /* * If the destination address is UNSPECIFIED addr, * use the loopback addr, e.g ::1. */ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) sin6->sin6_addr = in6addr_loopback; } if ((error = prison_remote_ip6(inp->inp_cred, &sin6->sin6_addr)) != 0) return (error); if (sas_required) { error = in6_selectsrc_socket(sin6, inp->in6p_outputopts, inp, inp->inp_cred, scope_ambiguous, &in6a, NULL); if (error) return (error); } else { /* * Source address selection isn't required when syncache * has already established connection and both source and * destination addresses was chosen. * * This also includes the case when fwd_tag was used to * select source address in tcp_input(). */ in6a = inp->in6p_laddr; } if (IN6_IS_ADDR_UNSPECIFIED(&in6a)) return (EHOSTUNREACH); /* * Do not update this earlier, in case we return with an error. * * XXX: this in6_selectsrc_socket result might replace the bound local * address with the address specified by setsockopt(IPV6_PKTINFO). * Is it the intended behavior? */ *plocal_addr6 = in6a; /* * Don't do pcblookup call here; return interface in * plocal_addr6 * and exit to caller, that will do the lookup. */ return (0); } /* * Outer subroutine: * Connect from a socket to a specified address. * Both address and port must be specified in argument sin. * If don't have a local address for this socket yet, * then pick one. */ int in6_pcbconnect(struct inpcb *inp, struct sockaddr_in6 *sin6, struct ucred *cred, bool sas_required) { struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; struct sockaddr_in6 laddr6; int error; NET_EPOCH_ASSERT(); INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(pcbinfo); KASSERT(sin6->sin6_family == AF_INET6, ("%s: invalid address family for %p", __func__, sin6)); KASSERT(sin6->sin6_len == sizeof(*sin6), ("%s: invalid address length for %p", __func__, sin6)); KASSERT(IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr), ("%s: inp is already connected", __func__)); bzero(&laddr6, sizeof(laddr6)); laddr6.sin6_family = AF_INET6; #ifdef ROUTE_MPATH if (CALC_FLOWID_OUTBOUND) { uint32_t hash_type, hash_val; hash_val = fib6_calc_software_hash(&inp->in6p_laddr, &sin6->sin6_addr, 0, sin6->sin6_port, inp->inp_socket->so_proto->pr_protocol, &hash_type); inp->inp_flowid = hash_val; inp->inp_flowtype = hash_type; } #endif /* * Call inner routine, to assign local interface address. * in6_pcbladdr() may automatically fill in sin6_scope_id. */ if ((error = in6_pcbladdr(inp, sin6, &laddr6.sin6_addr, sas_required)) != 0) return (error); if (in6_pcblookup_hash_locked(pcbinfo, &sin6->sin6_addr, sin6->sin6_port, IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ? &laddr6.sin6_addr : &inp->in6p_laddr, inp->inp_lport, 0, M_NODOM) != NULL) return (EADDRINUSE); if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { if (inp->inp_lport == 0) { error = in_pcb_lport_dest(inp, (struct sockaddr *) &laddr6, &inp->inp_lport, (struct sockaddr *) sin6, sin6->sin6_port, cred, INPLOOKUP_WILDCARD); if (error) return (error); } inp->in6p_laddr = laddr6.sin6_addr; } inp->in6p_faddr = sin6->sin6_addr; inp->inp_fport = sin6->sin6_port; /* update flowinfo - draft-itojun-ipv6-flowlabel-api-00 */ inp->inp_flow &= ~IPV6_FLOWLABEL_MASK; if (inp->inp_flags & IN6P_AUTOFLOWLABEL) inp->inp_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); if ((inp->inp_flags & INP_INHASHLIST) != 0) { in_pcbrehash(inp); } else { in_pcbinshash(inp); } return (0); } void in6_pcbdisconnect(struct inpcb *inp) { INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); KASSERT(inp->inp_smr == SMR_SEQ_INVALID, ("%s: inp %p was already disconnected", __func__, inp)); in_pcbremhash_locked(inp); /* See the comment in in_pcbinshash(). */ inp->inp_smr = smr_advance(inp->inp_pcbinfo->ipi_smr); /* XXX-MJ torn writes are visible to SMR lookup */ memset(&inp->in6p_laddr, 0, sizeof(inp->in6p_laddr)); memset(&inp->in6p_faddr, 0, sizeof(inp->in6p_faddr)); inp->inp_fport = 0; /* clear flowinfo - draft-itojun-ipv6-flowlabel-api-00 */ inp->inp_flow &= ~IPV6_FLOWLABEL_MASK; } -struct sockaddr * -in6_sockaddr(in_port_t port, struct in6_addr *addr_p) -{ - struct sockaddr_in6 *sin6; - - sin6 = malloc(sizeof *sin6, M_SONAME, M_WAITOK); - bzero(sin6, sizeof *sin6); - sin6->sin6_family = AF_INET6; - sin6->sin6_len = sizeof(*sin6); - sin6->sin6_port = port; - sin6->sin6_addr = *addr_p; - (void)sa6_recoverscope(sin6); /* XXX: should catch errors */ - - return (struct sockaddr *)sin6; -} - int -in6_getsockaddr(struct socket *so, struct sockaddr **nam) +in6_getsockaddr(struct socket *so, struct sockaddr *sa) { struct inpcb *inp; - struct in6_addr addr; - in_port_t port; inp = sotoinpcb(so); KASSERT(inp != NULL, ("in6_getsockaddr: inp == NULL")); - INP_RLOCK(inp); - port = inp->inp_lport; - addr = inp->in6p_laddr; - INP_RUNLOCK(inp); + *(struct sockaddr_in6 *)sa = (struct sockaddr_in6 ){ + .sin6_len = sizeof(struct sockaddr_in6), + .sin6_family = AF_INET6, + .sin6_port = inp->inp_lport, + .sin6_addr = inp->in6p_laddr, + }; + /* XXX: should catch errors */ + (void)sa6_recoverscope((struct sockaddr_in6 *)sa); - *nam = in6_sockaddr(port, &addr); - return 0; + return (0); } int -in6_getpeeraddr(struct socket *so, struct sockaddr **nam) +in6_getpeeraddr(struct socket *so, struct sockaddr *sa) { struct inpcb *inp; - struct in6_addr addr; - in_port_t port; inp = sotoinpcb(so); KASSERT(inp != NULL, ("in6_getpeeraddr: inp == NULL")); - INP_RLOCK(inp); - port = inp->inp_fport; - addr = inp->in6p_faddr; - INP_RUNLOCK(inp); + *(struct sockaddr_in6 *)sa = (struct sockaddr_in6 ){ + .sin6_len = sizeof(struct sockaddr_in6), + .sin6_family = AF_INET6, + .sin6_port = inp->inp_fport, + .sin6_addr = inp->in6p_faddr, + }; + /* XXX: should catch errors */ + (void)sa6_recoverscope((struct sockaddr_in6 *)sa); - *nam = in6_sockaddr(port, &addr); - return 0; + return (0); } int -in6_mapped_sockaddr(struct socket *so, struct sockaddr **nam) +in6_mapped_sockaddr(struct socket *so, struct sockaddr *sa) { struct inpcb *inp; int error; inp = sotoinpcb(so); KASSERT(inp != NULL, ("in6_mapped_sockaddr: inp == NULL")); #ifdef INET if ((inp->inp_vflag & (INP_IPV4 | INP_IPV6)) == INP_IPV4) { - error = in_getsockaddr(so, nam); + struct sockaddr_in sin; + + error = in_getsockaddr(so, (struct sockaddr *)&sin); if (error == 0) - in6_sin_2_v4mapsin6_in_sock(nam); + in6_sin_2_v4mapsin6(&sin, (struct sockaddr_in6 *)sa); } else #endif { /* scope issues will be handled in in6_getsockaddr(). */ - error = in6_getsockaddr(so, nam); + error = in6_getsockaddr(so, sa); } return error; } int -in6_mapped_peeraddr(struct socket *so, struct sockaddr **nam) +in6_mapped_peeraddr(struct socket *so, struct sockaddr *sa) { struct inpcb *inp; int error; inp = sotoinpcb(so); KASSERT(inp != NULL, ("in6_mapped_peeraddr: inp == NULL")); #ifdef INET if ((inp->inp_vflag & (INP_IPV4 | INP_IPV6)) == INP_IPV4) { - error = in_getpeeraddr(so, nam); + struct sockaddr_in sin; + + error = in_getpeeraddr(so, (struct sockaddr *)&sin); if (error == 0) - in6_sin_2_v4mapsin6_in_sock(nam); + in6_sin_2_v4mapsin6(&sin, (struct sockaddr_in6 *)sa); } else #endif /* scope issues will be handled in in6_getpeeraddr(). */ - error = in6_getpeeraddr(so, nam); + error = in6_getpeeraddr(so, sa); return error; } /* * Pass some notification to all connections of a protocol * associated with address dst. The local address and/or port numbers * may be specified to limit the search. The "usual action" will be * taken, depending on the ctlinput cmd. The caller must filter any * cmds that are uninteresting (e.g., no error in the map). * Call the protocol specific routine (if any) to report * any errors for each matching socket. */ static bool inp_match6(const struct inpcb *inp, void *v __unused) { return ((inp->inp_vflag & INP_IPV6) != 0); } void in6_pcbnotify(struct inpcbinfo *pcbinfo, struct sockaddr_in6 *sa6_dst, u_int fport_arg, const struct sockaddr_in6 *src, u_int lport_arg, int errno, void *cmdarg, struct inpcb *(*notify)(struct inpcb *, int)) { struct inpcb_iterator inpi = INP_ITERATOR(pcbinfo, INPLOOKUP_WLOCKPCB, inp_match6, NULL); struct inpcb *inp; struct sockaddr_in6 sa6_src; u_short fport = fport_arg, lport = lport_arg; u_int32_t flowinfo; if (IN6_IS_ADDR_UNSPECIFIED(&sa6_dst->sin6_addr)) return; /* * note that src can be NULL when we get notify by local fragmentation. */ sa6_src = (src == NULL) ? sa6_any : *src; flowinfo = sa6_src.sin6_flowinfo; while ((inp = inp_next(&inpi)) != NULL) { INP_WLOCK_ASSERT(inp); /* * If the error designates a new path MTU for a destination * and the application (associated with this socket) wanted to * know the value, notify. * XXX: should we avoid to notify the value to TCP sockets? */ if (errno == EMSGSIZE && cmdarg != NULL) ip6_notify_pmtu(inp, sa6_dst, *(uint32_t *)cmdarg); /* * Detect if we should notify the error. If no source and * destination ports are specified, but non-zero flowinfo and * local address match, notify the error. This is the case * when the error is delivered with an encrypted buffer * by ESP. Otherwise, just compare addresses and ports * as usual. */ if (lport == 0 && fport == 0 && flowinfo && inp->inp_socket != NULL && flowinfo == (inp->inp_flow & IPV6_FLOWLABEL_MASK) && IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, &sa6_src.sin6_addr)) goto do_notify; else if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, &sa6_dst->sin6_addr) || inp->inp_socket == 0 || (lport && inp->inp_lport != lport) || (!IN6_IS_ADDR_UNSPECIFIED(&sa6_src.sin6_addr) && !IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, &sa6_src.sin6_addr)) || (fport && inp->inp_fport != fport)) { continue; } do_notify: if (notify) (*notify)(inp, errno); } } /* * Lookup a PCB based on the local address and port. Caller must hold the * hash lock. No inpcb locks or references are acquired. */ struct inpcb * in6_pcblookup_local(struct inpcbinfo *pcbinfo, struct in6_addr *laddr, u_short lport, int lookupflags, struct ucred *cred) { struct inpcb *inp; int matchwild = 3, wildcard; KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0, ("%s: invalid lookup flags %d", __func__, lookupflags)); INP_HASH_LOCK_ASSERT(pcbinfo); if ((lookupflags & INPLOOKUP_WILDCARD) == 0) { struct inpcbhead *head; /* * Look for an unconnected (wildcard foreign addr) PCB that * matches the local address and port we're looking for. */ head = &pcbinfo->ipi_hash_wild[INP_PCBHASH_WILD(lport, pcbinfo->ipi_hashmask)]; CK_LIST_FOREACH(inp, head, inp_hash_wild) { /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV6) == 0) continue; if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) && IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) && inp->inp_lport == lport) { /* Found. */ if (prison_equal_ip6(cred->cr_prison, inp->inp_cred->cr_prison)) return (inp); } } /* * Not found. */ return (NULL); } else { struct inpcbporthead *porthash; struct inpcbport *phd; struct inpcb *match = NULL; /* * Best fit PCB lookup. * * First see if this local port is in use by looking on the * port hash list. */ porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport, pcbinfo->ipi_porthashmask)]; CK_LIST_FOREACH(phd, porthash, phd_hash) { if (phd->phd_port == lport) break; } if (phd != NULL) { /* * Port is in use by one or more PCBs. Look for best * fit. */ CK_LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) { wildcard = 0; if (!prison_equal_ip6(cred->cr_prison, inp->inp_cred->cr_prison)) continue; /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV6) == 0) continue; if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) wildcard++; if (!IN6_IS_ADDR_UNSPECIFIED( &inp->in6p_laddr)) { if (IN6_IS_ADDR_UNSPECIFIED(laddr)) wildcard++; else if (!IN6_ARE_ADDR_EQUAL( &inp->in6p_laddr, laddr)) continue; } else { if (!IN6_IS_ADDR_UNSPECIFIED(laddr)) wildcard++; } if (wildcard < matchwild) { match = inp; matchwild = wildcard; if (matchwild == 0) break; } } } return (match); } } static bool in6_multi_match(const struct inpcb *inp, void *v __unused) { if ((inp->inp_vflag & INP_IPV6) && inp->in6p_moptions != NULL) return (true); else return (false); } void in6_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp) { struct inpcb_iterator inpi = INP_ITERATOR(pcbinfo, INPLOOKUP_RLOCKPCB, in6_multi_match, NULL); struct inpcb *inp; struct in6_multi *inm; struct in6_mfilter *imf; struct ip6_moptions *im6o; IN6_MULTI_LOCK_ASSERT(); while ((inp = inp_next(&inpi)) != NULL) { INP_RLOCK_ASSERT(inp); im6o = inp->in6p_moptions; /* * Unselect the outgoing ifp for multicast if it * is being detached. */ if (im6o->im6o_multicast_ifp == ifp) im6o->im6o_multicast_ifp = NULL; /* * Drop multicast group membership if we joined * through the interface being detached. */ restart: IP6_MFILTER_FOREACH(imf, &im6o->im6o_head) { if ((inm = imf->im6f_in6m) == NULL) continue; if (inm->in6m_ifp != ifp) continue; ip6_mfilter_remove(&im6o->im6o_head, imf); in6_leavegroup_locked(inm, NULL); ip6_mfilter_free(imf); goto restart; } } } /* * Check for alternatives when higher level complains * about service problems. For now, invalidate cached * routing information. If the route was created dynamically * (by a redirect), time to try a default gateway again. */ void in6_losing(struct inpcb *inp) { RO_INVALIDATE_CACHE(&inp->inp_route6); } /* * After a routing change, flush old routing * and allocate a (hopefully) better one. */ struct inpcb * in6_rtchange(struct inpcb *inp, int errno __unused) { RO_INVALIDATE_CACHE(&inp->inp_route6); return inp; } static bool in6_pcblookup_lb_numa_match(const struct inpcblbgroup *grp, int domain) { return (domain == M_NODOM || domain == grp->il_numa_domain); } static struct inpcb * in6_pcblookup_lbgroup(const struct inpcbinfo *pcbinfo, const struct in6_addr *faddr, uint16_t fport, const struct in6_addr *laddr, uint16_t lport, uint8_t domain) { const struct inpcblbgrouphead *hdr; struct inpcblbgroup *grp; struct inpcblbgroup *jail_exact, *jail_wild, *local_exact, *local_wild; INP_HASH_LOCK_ASSERT(pcbinfo); hdr = &pcbinfo->ipi_lbgrouphashbase[ INP_PCBPORTHASH(lport, pcbinfo->ipi_lbgrouphashmask)]; /* * Search for an LB group match based on the following criteria: * - prefer jailed groups to non-jailed groups * - prefer exact source address matches to wildcard matches * - prefer groups bound to the specified NUMA domain */ jail_exact = jail_wild = local_exact = local_wild = NULL; CK_LIST_FOREACH(grp, hdr, il_list) { bool injail; #ifdef INET if (!(grp->il_vflag & INP_IPV6)) continue; #endif if (grp->il_lport != lport) continue; injail = prison_flag(grp->il_cred, PR_IP6) != 0; if (injail && prison_check_ip6_locked(grp->il_cred->cr_prison, laddr) != 0) continue; if (IN6_ARE_ADDR_EQUAL(&grp->il6_laddr, laddr)) { if (injail) { jail_exact = grp; if (in6_pcblookup_lb_numa_match(grp, domain)) /* This is a perfect match. */ goto out; } else if (local_exact == NULL || in6_pcblookup_lb_numa_match(grp, domain)) { local_exact = grp; } } else if (IN6_IS_ADDR_UNSPECIFIED(&grp->il6_laddr)) { if (injail) { if (jail_wild == NULL || in6_pcblookup_lb_numa_match(grp, domain)) jail_wild = grp; } else if (local_wild == NULL || in6_pcblookup_lb_numa_match(grp, domain)) { local_wild = grp; } } } if (jail_exact != NULL) grp = jail_exact; else if (jail_wild != NULL) grp = jail_wild; else if (local_exact != NULL) grp = local_exact; else grp = local_wild; if (grp == NULL) return (NULL); out: return (grp->il_inp[INP6_PCBLBGROUP_PKTHASH(faddr, lport, fport) % grp->il_inpcnt]); } static bool in6_pcblookup_exact_match(const struct inpcb *inp, const struct in6_addr *faddr, u_short fport, const struct in6_addr *laddr, u_short lport) { /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV6) == 0) return (false); if (IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, faddr) && IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) && inp->inp_fport == fport && inp->inp_lport == lport) return (true); return (false); } static struct inpcb * in6_pcblookup_hash_exact(struct inpcbinfo *pcbinfo, const struct in6_addr *faddr, u_short fport, const struct in6_addr *laddr, u_short lport) { struct inpcbhead *head; struct inpcb *inp; INP_HASH_LOCK_ASSERT(pcbinfo); /* * First look for an exact match. */ head = &pcbinfo->ipi_hash_exact[INP6_PCBHASH(faddr, lport, fport, pcbinfo->ipi_hashmask)]; CK_LIST_FOREACH(inp, head, inp_hash_exact) { if (in6_pcblookup_exact_match(inp, faddr, fport, laddr, lport)) return (inp); } return (NULL); } typedef enum { INPLOOKUP_MATCH_NONE = 0, INPLOOKUP_MATCH_WILD = 1, INPLOOKUP_MATCH_LADDR = 2, } inp_lookup_match_t; static inp_lookup_match_t in6_pcblookup_wild_match(const struct inpcb *inp, const struct in6_addr *laddr, u_short lport) { /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV6) == 0) return (INPLOOKUP_MATCH_NONE); if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) || inp->inp_lport != lport) return (INPLOOKUP_MATCH_NONE); if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) return (INPLOOKUP_MATCH_WILD); if (IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr)) return (INPLOOKUP_MATCH_LADDR); return (INPLOOKUP_MATCH_NONE); } #define INP_LOOKUP_AGAIN ((struct inpcb *)(uintptr_t)-1) static struct inpcb * in6_pcblookup_hash_wild_smr(struct inpcbinfo *pcbinfo, const struct in6_addr *faddr, u_short fport, const struct in6_addr *laddr, u_short lport, const inp_lookup_t lockflags) { struct inpcbhead *head; struct inpcb *inp; KASSERT(SMR_ENTERED(pcbinfo->ipi_smr), ("%s: not in SMR read section", __func__)); head = &pcbinfo->ipi_hash_wild[INP_PCBHASH_WILD(lport, pcbinfo->ipi_hashmask)]; CK_LIST_FOREACH(inp, head, inp_hash_wild) { inp_lookup_match_t match; match = in6_pcblookup_wild_match(inp, laddr, lport); if (match == INPLOOKUP_MATCH_NONE) continue; if (__predict_true(inp_smr_lock(inp, lockflags))) { match = in6_pcblookup_wild_match(inp, laddr, lport); if (match != INPLOOKUP_MATCH_NONE && prison_check_ip6_locked(inp->inp_cred->cr_prison, laddr) == 0) return (inp); inp_unlock(inp, lockflags); } /* * The matching socket disappeared out from under us. Fall back * to a serialized lookup. */ return (INP_LOOKUP_AGAIN); } return (NULL); } static struct inpcb * in6_pcblookup_hash_wild_locked(struct inpcbinfo *pcbinfo, const struct in6_addr *faddr, u_short fport, const struct in6_addr *laddr, u_short lport) { struct inpcbhead *head; struct inpcb *inp, *jail_wild, *local_exact, *local_wild; INP_HASH_LOCK_ASSERT(pcbinfo); /* * Order of socket selection - we always prefer jails. * 1. jailed, non-wild. * 2. jailed, wild. * 3. non-jailed, non-wild. * 4. non-jailed, wild. */ head = &pcbinfo->ipi_hash_wild[INP_PCBHASH_WILD(lport, pcbinfo->ipi_hashmask)]; local_wild = local_exact = jail_wild = NULL; CK_LIST_FOREACH(inp, head, inp_hash_wild) { inp_lookup_match_t match; bool injail; match = in6_pcblookup_wild_match(inp, laddr, lport); if (match == INPLOOKUP_MATCH_NONE) continue; injail = prison_flag(inp->inp_cred, PR_IP6) != 0; if (injail) { if (prison_check_ip6_locked( inp->inp_cred->cr_prison, laddr) != 0) continue; } else { if (local_exact != NULL) continue; } if (match == INPLOOKUP_MATCH_LADDR) { if (injail) return (inp); else local_exact = inp; } else { if (injail) jail_wild = inp; else local_wild = inp; } } if (jail_wild != NULL) return (jail_wild); if (local_exact != NULL) return (local_exact); if (local_wild != NULL) return (local_wild); return (NULL); } struct inpcb * in6_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, const struct in6_addr *faddr, u_int fport_arg, const struct in6_addr *laddr, u_int lport_arg, int lookupflags, uint8_t numa_domain) { struct inpcb *inp; u_short fport = fport_arg, lport = lport_arg; KASSERT((lookupflags & ~INPLOOKUP_WILDCARD) == 0, ("%s: invalid lookup flags %d", __func__, lookupflags)); KASSERT(!IN6_IS_ADDR_UNSPECIFIED(faddr), ("%s: invalid foreign address", __func__)); KASSERT(!IN6_IS_ADDR_UNSPECIFIED(laddr), ("%s: invalid local address", __func__)); INP_HASH_LOCK_ASSERT(pcbinfo); inp = in6_pcblookup_hash_exact(pcbinfo, faddr, fport, laddr, lport); if (inp != NULL) return (inp); if ((lookupflags & INPLOOKUP_WILDCARD) != 0) { inp = in6_pcblookup_lbgroup(pcbinfo, faddr, fport, laddr, lport, numa_domain); if (inp == NULL) { inp = in6_pcblookup_hash_wild_locked(pcbinfo, faddr, fport, laddr, lport); } } return (inp); } static struct inpcb * in6_pcblookup_hash(struct inpcbinfo *pcbinfo, const struct in6_addr *faddr, u_int fport, const struct in6_addr *laddr, u_int lport, int lookupflags, uint8_t numa_domain) { struct inpcb *inp; const inp_lookup_t lockflags = lookupflags & INPLOOKUP_LOCKMASK; KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0, ("%s: LOCKPCB not set", __func__)); INP_HASH_WLOCK(pcbinfo); inp = in6_pcblookup_hash_locked(pcbinfo, faddr, fport, laddr, lport, lookupflags & ~INPLOOKUP_LOCKMASK, numa_domain); if (inp != NULL && !inp_trylock(inp, lockflags)) { in_pcbref(inp); INP_HASH_WUNLOCK(pcbinfo); inp_lock(inp, lockflags); if (in_pcbrele(inp, lockflags)) /* XXX-MJ or retry until we get a negative match? */ inp = NULL; } else { INP_HASH_WUNLOCK(pcbinfo); } return (inp); } static struct inpcb * in6_pcblookup_hash_smr(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, u_int fport_arg, struct in6_addr *laddr, u_int lport_arg, int lookupflags, uint8_t numa_domain) { struct inpcb *inp; const inp_lookup_t lockflags = lookupflags & INPLOOKUP_LOCKMASK; const u_short fport = fport_arg, lport = lport_arg; KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0, ("%s: invalid lookup flags %d", __func__, lookupflags)); KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0, ("%s: LOCKPCB not set", __func__)); smr_enter(pcbinfo->ipi_smr); inp = in6_pcblookup_hash_exact(pcbinfo, faddr, fport, laddr, lport); if (inp != NULL) { if (__predict_true(inp_smr_lock(inp, lockflags))) { if (__predict_true(in6_pcblookup_exact_match(inp, faddr, fport, laddr, lport))) return (inp); inp_unlock(inp, lockflags); } /* * We failed to lock the inpcb, or its connection state changed * out from under us. Fall back to a precise search. */ return (in6_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport, lookupflags, numa_domain)); } if ((lookupflags & INPLOOKUP_WILDCARD) != 0) { inp = in6_pcblookup_lbgroup(pcbinfo, faddr, fport, laddr, lport, numa_domain); if (inp != NULL) { if (__predict_true(inp_smr_lock(inp, lockflags))) { if (__predict_true(in6_pcblookup_wild_match(inp, laddr, lport) != INPLOOKUP_MATCH_NONE)) return (inp); inp_unlock(inp, lockflags); } inp = INP_LOOKUP_AGAIN; } else { inp = in6_pcblookup_hash_wild_smr(pcbinfo, faddr, fport, laddr, lport, lockflags); } if (inp == INP_LOOKUP_AGAIN) { return (in6_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport, lookupflags, numa_domain)); } } if (inp == NULL) smr_exit(pcbinfo->ipi_smr); return (inp); } /* * Public inpcb lookup routines, accepting a 4-tuple, and optionally, an mbuf * from which a pre-calculated hash value may be extracted. */ struct inpcb * in6_pcblookup(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, u_int fport, struct in6_addr *laddr, u_int lport, int lookupflags, struct ifnet *ifp __unused) { return (in6_pcblookup_hash_smr(pcbinfo, faddr, fport, laddr, lport, lookupflags, M_NODOM)); } struct inpcb * in6_pcblookup_mbuf(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, u_int fport, struct in6_addr *laddr, u_int lport, int lookupflags, struct ifnet *ifp __unused, struct mbuf *m) { return (in6_pcblookup_hash_smr(pcbinfo, faddr, fport, laddr, lport, lookupflags, m->m_pkthdr.numa_domain)); } void init_sin6(struct sockaddr_in6 *sin6, struct mbuf *m, int srcordst) { struct ip6_hdr *ip; ip = mtod(m, struct ip6_hdr *); bzero(sin6, sizeof(*sin6)); sin6->sin6_len = sizeof(*sin6); sin6->sin6_family = AF_INET6; sin6->sin6_addr = srcordst ? ip->ip6_dst : ip->ip6_src; (void)sa6_recoverscope(sin6); /* XXX: should catch errors... */ return; } diff --git a/sys/netinet6/in6_pcb.h b/sys/netinet6/in6_pcb.h index acccc279fdc7..b9e1cd38a60f 100644 --- a/sys/netinet6/in6_pcb.h +++ b/sys/netinet6/in6_pcb.h @@ -1,110 +1,108 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $KAME: in6_pcb.h,v 1.13 2001/02/06 09:16:53 itojun Exp $ */ /*- * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _NETINET6_IN6_PCB_H_ #define _NETINET6_IN6_PCB_H_ #ifdef _KERNEL #define satosin6(sa) ((struct sockaddr_in6 *)(sa)) #define sin6tosa(sin6) ((struct sockaddr *)(sin6)) #define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa)) void in6_pcbpurgeif0(struct inpcbinfo *, struct ifnet *); void in6_losing(struct inpcb *); int in6_pcbbind(struct inpcb *, struct sockaddr_in6 *, struct ucred *); int in6_pcbconnect(struct inpcb *, struct sockaddr_in6 *, struct ucred *, bool); void in6_pcbdisconnect(struct inpcb *); struct inpcb * in6_pcblookup_local(struct inpcbinfo *, struct in6_addr *, u_short, int, struct ucred *); struct inpcb * in6_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, const struct in6_addr *faddr, u_int fport_arg, const struct in6_addr *laddr, u_int lport_arg, int lookupflags, uint8_t); struct inpcb * in6_pcblookup(struct inpcbinfo *, struct in6_addr *, u_int, struct in6_addr *, u_int, int, struct ifnet *); struct inpcb * in6_pcblookup_mbuf(struct inpcbinfo *, struct in6_addr *, u_int, struct in6_addr *, u_int, int, struct ifnet *ifp, struct mbuf *); void in6_pcbnotify(struct inpcbinfo *, struct sockaddr_in6 *, u_int, const struct sockaddr_in6 *, u_int, int, void *, struct inpcb *(*)(struct inpcb *, int)); struct inpcb * in6_rtchange(struct inpcb *, int); -struct sockaddr * - in6_sockaddr(in_port_t port, struct in6_addr *addr_p); -int in6_getpeeraddr(struct socket *so, struct sockaddr **nam); -int in6_getsockaddr(struct socket *so, struct sockaddr **nam); -int in6_mapped_sockaddr(struct socket *so, struct sockaddr **nam); -int in6_mapped_peeraddr(struct socket *so, struct sockaddr **nam); +int in6_getpeeraddr(struct socket *, struct sockaddr *); +int in6_getsockaddr(struct socket *, struct sockaddr *); +int in6_mapped_sockaddr(struct socket *, struct sockaddr *); +int in6_mapped_peeraddr(struct socket *, struct sockaddr *); int in6_selecthlim(struct inpcb *, struct ifnet *); int in6_pcbsetport(struct in6_addr *, struct inpcb *, struct ucred *); void init_sin6(struct sockaddr_in6 *sin6, struct mbuf *m, int); #endif /* _KERNEL */ #endif /* !_NETINET6_IN6_PCB_H_ */ diff --git a/sys/netinet6/sctp6_usrreq.c b/sys/netinet6/sctp6_usrreq.c index 6c36e4899906..1268e4990e90 100644 --- a/sys/netinet6/sctp6_usrreq.c +++ b/sys/netinet6/sctp6_usrreq.c @@ -1,1151 +1,1119 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef INET6 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int sctp6_input_with_port(struct mbuf **i_pak, int *offp, uint16_t port) { struct mbuf *m; int iphlen; uint32_t vrf_id; uint8_t ecn_bits; struct sockaddr_in6 src, dst; struct ip6_hdr *ip6; struct sctphdr *sh; struct sctp_chunkhdr *ch; int length, offset; uint8_t compute_crc; uint32_t mflowid; uint8_t mflowtype; uint16_t fibnum; iphlen = *offp; if (SCTP_GET_PKT_VRFID(*i_pak, vrf_id)) { SCTP_RELEASE_PKT(*i_pak); return (IPPROTO_DONE); } m = SCTP_HEADER_TO_CHAIN(*i_pak); #ifdef SCTP_MBUF_LOGGING /* Log in any input mbufs */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(m, SCTP_MBUF_INPUT); } #endif #ifdef SCTP_PACKET_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { sctp_packet_log(m); } #endif SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, "sctp6_input(): Packet of length %d received on %s with csum_flags 0x%b.\n", m->m_pkthdr.len, if_name(m->m_pkthdr.rcvif), (int)m->m_pkthdr.csum_flags, CSUM_BITS); mflowid = m->m_pkthdr.flowid; mflowtype = M_HASHTYPE_GET(m); fibnum = M_GETFIB(m); SCTP_STAT_INCR(sctps_recvpackets); SCTP_STAT_INCR_COUNTER64(sctps_inpackets); /* Get IP, SCTP, and first chunk header together in the first mbuf. */ offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); if (m->m_len < offset) { m = m_pullup(m, offset); if (m == NULL) { SCTP_STAT_INCR(sctps_hdrops); return (IPPROTO_DONE); } } ip6 = mtod(m, struct ip6_hdr *); sh = (struct sctphdr *)(mtod(m, caddr_t)+iphlen); ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); offset -= sizeof(struct sctp_chunkhdr); memset(&src, 0, sizeof(struct sockaddr_in6)); src.sin6_family = AF_INET6; src.sin6_len = sizeof(struct sockaddr_in6); src.sin6_port = sh->src_port; src.sin6_addr = ip6->ip6_src; if (in6_setscope(&src.sin6_addr, m->m_pkthdr.rcvif, NULL) != 0) { goto out; } memset(&dst, 0, sizeof(struct sockaddr_in6)); dst.sin6_family = AF_INET6; dst.sin6_len = sizeof(struct sockaddr_in6); dst.sin6_port = sh->dest_port; dst.sin6_addr = ip6->ip6_dst; if (in6_setscope(&dst.sin6_addr, m->m_pkthdr.rcvif, NULL) != 0) { goto out; } length = ntohs(ip6->ip6_plen) + iphlen; /* Validate mbuf chain length with IP payload length. */ if (SCTP_HEADER_LEN(m) != length) { SCTPDBG(SCTP_DEBUG_INPUT1, "sctp6_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m)); SCTP_STAT_INCR(sctps_hdrops); goto out; } if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { goto out; } ecn_bits = IPV6_TRAFFIC_CLASS(ip6); if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { SCTP_STAT_INCR(sctps_recvhwcrc); compute_crc = 0; } else { SCTP_STAT_INCR(sctps_recvswcrc); compute_crc = 1; } sctp_common_input_processing(&m, iphlen, offset, length, (struct sockaddr *)&src, (struct sockaddr *)&dst, sh, ch, compute_crc, ecn_bits, mflowtype, mflowid, fibnum, vrf_id, port); out: if (m) { sctp_m_freem(m); } return (IPPROTO_DONE); } int sctp6_input(struct mbuf **i_pak, int *offp, int proto SCTP_UNUSED) { return (sctp6_input_with_port(i_pak, offp, 0)); } void sctp6_notify(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint8_t icmp6_type, uint8_t icmp6_code, uint32_t next_mtu) { int timer_stopped; switch (icmp6_type) { case ICMP6_DST_UNREACH: if ((icmp6_code == ICMP6_DST_UNREACH_NOROUTE) || (icmp6_code == ICMP6_DST_UNREACH_ADMIN) || (icmp6_code == ICMP6_DST_UNREACH_BEYONDSCOPE) || (icmp6_code == ICMP6_DST_UNREACH_ADDR)) { /* Mark the net unreachable. */ if (net->dest_state & SCTP_ADDR_REACHABLE) { /* Ok that destination is not reachable */ net->dest_state &= ~SCTP_ADDR_REACHABLE; net->dest_state &= ~SCTP_ADDR_PF; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, (void *)net, SCTP_SO_NOT_LOCKED); } } SCTP_TCB_UNLOCK(stcb); break; case ICMP6_PARAM_PROB: /* Treat it like an ABORT. */ if (icmp6_code == ICMP6_PARAMPROB_NEXTHEADER) { sctp_abort_notification(stcb, true, false, 0, NULL, SCTP_SO_NOT_LOCKED); (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); } else { SCTP_TCB_UNLOCK(stcb); } break; case ICMP6_PACKET_TOO_BIG: if (net->dest_state & SCTP_ADDR_NO_PMTUD) { SCTP_TCB_UNLOCK(stcb); break; } if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { timer_stopped = 1; sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); } else { timer_stopped = 0; } /* Update the path MTU. */ if (net->port) { next_mtu -= sizeof(struct udphdr); } if (net->mtu > next_mtu) { net->mtu = next_mtu; if (net->port) { sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu + sizeof(struct udphdr)); } else { sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu); } } /* Update the association MTU */ if (stcb->asoc.smallest_mtu > next_mtu) { sctp_pathmtu_adjustment(stcb, next_mtu, true); } /* Finally, start the PMTU timer if it was running before. */ if (timer_stopped) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } SCTP_TCB_UNLOCK(stcb); break; default: SCTP_TCB_UNLOCK(stcb); break; } } void sctp6_ctlinput(struct ip6ctlparam *ip6cp) { struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; struct sctphdr sh; struct sockaddr_in6 src, dst; if (icmp6_errmap(ip6cp->ip6c_icmp6) == 0) { return; } /* * Check if we can safely examine the ports and the verification tag * of the SCTP common header. */ if (ip6cp->ip6c_m->m_pkthdr.len < (int32_t)(ip6cp->ip6c_off + offsetof(struct sctphdr, checksum))) { return; } /* Copy out the port numbers and the verification tag. */ memset(&sh, 0, sizeof(sh)); m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), (caddr_t)&sh); memset(&src, 0, sizeof(struct sockaddr_in6)); src.sin6_family = AF_INET6; src.sin6_len = sizeof(struct sockaddr_in6); src.sin6_port = sh.src_port; src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { return; } memset(&dst, 0, sizeof(struct sockaddr_in6)); dst.sin6_family = AF_INET6; dst.sin6_len = sizeof(struct sockaddr_in6); dst.sin6_port = sh.dest_port; dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { return; } inp = NULL; net = NULL; stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, (struct sockaddr *)&src, &inp, &net, 1, SCTP_DEFAULT_VRFID); if ((stcb != NULL) && (net != NULL) && (inp != NULL)) { /* Check the verification tag */ if (ntohl(sh.v_tag) != 0) { /* * This must be the verification tag used for * sending out packets. We don't consider packets * reflecting the verification tag. */ if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { SCTP_TCB_UNLOCK(stcb); return; } } else { if (ip6cp->ip6c_m->m_pkthdr.len >= ip6cp->ip6c_off + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) + offsetof(struct sctp_init, a_rwnd)) { /* * In this case we can check if we got an * INIT chunk and if the initiate tag * matches. */ uint32_t initiate_tag; uint8_t chunk_type; m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off + sizeof(struct sctphdr), sizeof(uint8_t), (caddr_t)&chunk_type); m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr), sizeof(uint32_t), (caddr_t)&initiate_tag); if ((chunk_type != SCTP_INITIATION) || (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { SCTP_TCB_UNLOCK(stcb); return; } } else { SCTP_TCB_UNLOCK(stcb); return; } } sctp6_notify(inp, stcb, net, ip6cp->ip6c_icmp6->icmp6_type, ip6cp->ip6c_icmp6->icmp6_code, ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); } else { if ((stcb == NULL) && (inp != NULL)) { /* reduce inp's ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } if (stcb) { SCTP_TCB_UNLOCK(stcb); } } } /* * this routine can probably be collasped into the one in sctp_userreq.c * since they do the same thing and now we lookup with a sockaddr */ static int sctp6_getcred(SYSCTL_HANDLER_ARGS) { struct xucred xuc; struct sockaddr_in6 addrs[2]; struct sctp_inpcb *inp; struct sctp_nets *net; struct sctp_tcb *stcb; int error; uint32_t vrf_id; vrf_id = SCTP_DEFAULT_VRFID; error = priv_check(req->td, PRIV_NETINET_GETCRED); if (error) return (error); if (req->newlen != sizeof(addrs)) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (req->oldlen != sizeof(struct ucred)) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } error = SYSCTL_IN(req, addrs, sizeof(addrs)); if (error) return (error); stcb = sctp_findassociation_addr_sa(sin6tosa(&addrs[1]), sin6tosa(&addrs[0]), &inp, &net, 1, vrf_id); if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { if ((inp != NULL) && (stcb == NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); goto cred_can_cont; } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); error = ENOENT; goto out; } SCTP_TCB_UNLOCK(stcb); /* * We use the write lock here, only since in the error leg we need * it. If we used RLOCK, then we would have to * wlock/decr/unlock/rlock. Which in theory could create a hole. * Better to use higher wlock. */ SCTP_INP_WLOCK(inp); cred_can_cont: error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); if (error) { SCTP_INP_WUNLOCK(inp); goto out; } cru2x(inp->sctp_socket->so_cred, &xuc); SCTP_INP_WUNLOCK(inp); error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); out: return (error); } SYSCTL_PROC(_net_inet6_sctp6, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sctp6_getcred, "S,ucred", "Get the ucred of a SCTP6 connection"); static int sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED) { int error; struct sctp_inpcb *inp; uint32_t vrf_id = SCTP_DEFAULT_VRFID; inp = (struct sctp_inpcb *)so->so_pcb; if (inp != NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); if (error) return (error); } error = sctp_inpcb_alloc(so, vrf_id); if (error) return (error); inp = (struct sctp_inpcb *)so->so_pcb; SCTP_INP_WLOCK(inp); inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_V6; /* I'm v6! */ inp->ip_inp.inp.inp_vflag |= INP_IPV6; inp->ip_inp.inp.in6p_hops = -1; /* use kernel default */ inp->ip_inp.inp.in6p_cksum = -1; /* just to be sure */ #ifdef INET /* * XXX: ugly!! IPv4 TTL initialization is necessary for an IPv6 * socket as well, because the socket may be bound to an IPv6 * wildcard address, which may match an IPv4-mapped IPv6 address. */ inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl); #endif SCTP_INP_WUNLOCK(inp); return (0); } static int sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p) { struct sctp_inpcb *inp; int error; u_char vflagsav; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (addr) { switch (addr->sa_family) { #ifdef INET case AF_INET: if (addr->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif #ifdef INET6 case AF_INET6: if (addr->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } } vflagsav = inp->ip_inp.inp.inp_vflag; inp->ip_inp.inp.inp_vflag &= ~INP_IPV4; inp->ip_inp.inp.inp_vflag |= INP_IPV6; if ((addr != NULL) && (SCTP_IPV6_V6ONLY(inp) == 0)) { switch (addr->sa_family) { #ifdef INET case AF_INET: /* binding v4 addr to v6 socket, so reset flags */ inp->ip_inp.inp.inp_vflag |= INP_IPV4; inp->ip_inp.inp.inp_vflag &= ~INP_IPV6; break; #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6_p; sin6_p = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr)) { inp->ip_inp.inp.inp_vflag |= INP_IPV4; } #ifdef INET if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6_p); inp->ip_inp.inp.inp_vflag |= INP_IPV4; inp->ip_inp.inp.inp_vflag &= ~INP_IPV6; error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, NULL, p); goto out; } #endif break; } #endif default: break; } } else if (addr != NULL) { struct sockaddr_in6 *sin6_p; /* IPV6_V6ONLY socket */ #ifdef INET if (addr->sa_family == AF_INET) { /* can't bind v4 addr to v6 only socket! */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); error = EINVAL; goto out; } #endif sin6_p = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { /* can't bind v4-mapped addrs either! */ /* NOTE: we don't support SIIT */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); error = EINVAL; goto out; } } error = sctp_inpcb_bind(so, addr, NULL, p); out: if (error != 0) inp->ip_inp.inp.inp_vflag = vflagsav; return (error); } static void sctp6_close(struct socket *so) { sctp_close(so); } /* This could be made common with sctp_detach() since they are identical */ int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p); static int sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p) { struct sctp_inpcb *inp; #ifdef INET struct sockaddr_in6 *sin6; #endif /* INET */ /* No SPL needed since sctp_output does this */ inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } /* * For the TCP model we may get a NULL addr, if we are a connected * socket thats ok. */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) && (addr == NULL)) { goto connected_type; } if (addr == NULL) { SCTP_RELEASE_PKT(m); if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EDESTADDRREQ); return (EDESTADDRREQ); } switch (addr->sa_family) { #ifdef INET case AF_INET: if (addr->sa_len != sizeof(struct sockaddr_in)) { if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif #ifdef INET6 case AF_INET6: if (addr->sa_len != sizeof(struct sockaddr_in6)) { if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif default: if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } #ifdef INET sin6 = (struct sockaddr_in6 *)addr; if (SCTP_IPV6_V6ONLY(inp)) { /* * if IPV6_V6ONLY flag, we discard datagrams destined to a * v4 addr or v4-mapped addr */ if (addr->sa_family == AF_INET) { if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } } if ((addr->sa_family == AF_INET6) && IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { struct sockaddr_in sin; /* convert v4-mapped into v4 addr and send */ in6_sin6_2_sin(&sin, sin6); return (sctp_sendm(so, flags, m, (struct sockaddr *)&sin, control, p)); } #endif /* INET */ connected_type: /* now what about control */ if (control) { if (inp->control) { SCTP_PRINTF("huh? control set?\n"); SCTP_RELEASE_PKT(inp->control); inp->control = NULL; } inp->control = control; } /* Place the data */ if (inp->pkt) { SCTP_BUF_NEXT(inp->pkt_last) = m; inp->pkt_last = m; } else { inp->pkt_last = inp->pkt = m; } if ( /* FreeBSD and MacOSX uses a flag passed */ ((flags & PRUS_MORETOCOME) == 0) ) { /* * note with the current version this code will only be used * by OpenBSD, NetBSD and FreeBSD have methods for * re-defining sosend() to use sctp_sosend(). One can * optionaly switch back to this code (by changing back the * defininitions but this is not advisable. */ struct epoch_tracker et; int ret; NET_EPOCH_ENTER(et); ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); NET_EPOCH_EXIT(et); inp->pkt = NULL; inp->control = NULL; return (ret); } else { return (0); } } static int sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p) { struct epoch_tracker et; uint32_t vrf_id; int error = 0; struct sctp_inpcb *inp; struct sctp_tcb *stcb; #ifdef INET struct sockaddr_in6 *sin6; union sctp_sockstore store; #endif inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET); return (ECONNRESET); /* I made the same as TCP since we are * not setup? */ } if (addr == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } switch (addr->sa_family) { #ifdef INET case AF_INET: if (addr->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif #ifdef INET6 case AF_INET6: if (addr->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } vrf_id = inp->def_vrf_id; SCTP_ASOC_CREATE_LOCK(inp); SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ SCTP_INP_RUNLOCK(inp); error = sctp6_bind(so, NULL, p); if (error) { SCTP_ASOC_CREATE_UNLOCK(inp); return (error); } SCTP_INP_RLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EADDRINUSE); return (EADDRINUSE); } #ifdef INET sin6 = (struct sockaddr_in6 *)addr; if (SCTP_IPV6_V6ONLY(inp)) { /* * if IPV6_V6ONLY flag, ignore connections destined to a v4 * addr or v4-mapped addr */ if (addr->sa_family == AF_INET) { SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } } if ((addr->sa_family == AF_INET6) && IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { /* convert v4-mapped into v4 addr */ in6_sin6_2_sin(&store.sin, sin6); addr = &store.sa; } #endif /* INET */ /* Now do we connect? */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else { SCTP_INP_RUNLOCK(inp); SCTP_INP_WLOCK(inp); SCTP_INP_INCR_REF(inp); SCTP_INP_WUNLOCK(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); if (stcb == NULL) { SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } } if (stcb != NULL) { /* Already have or am bring up an association */ SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EALREADY); return (EALREADY); } /* We are GOOD to go */ stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id, inp->sctp_ep.pre_open_stream_count, inp->sctp_ep.port, p, SCTP_INITIALIZE_AUTH_PARAMS); SCTP_ASOC_CREATE_UNLOCK(inp); if (stcb == NULL) { /* Gak! no memory */ return (error); } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); NET_EPOCH_ENTER(et); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); NET_EPOCH_EXIT(et); return (error); } static int -sctp6_getaddr(struct socket *so, struct sockaddr **addr) +sctp6_getaddr(struct socket *so, struct sockaddr *sa) { - struct sockaddr_in6 *sin6; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa; struct sctp_inpcb *inp; uint32_t vrf_id; struct sctp_ifa *sctp_ifa; - int error; - /* - * Do the malloc first in case it blocks. - */ - SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof(*sin6)); - if (sin6 == NULL) - return (ENOMEM); - sin6->sin6_family = AF_INET6; - sin6->sin6_len = sizeof(*sin6); + *sin6 = (struct sockaddr_in6 ){ + .sin6_len = sizeof(struct sockaddr_in6), + .sin6_family = AF_INET6, + }; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { - SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET); return (ECONNRESET); } SCTP_INP_RLOCK(inp); sin6->sin6_port = inp->sctp_lport; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* For the bound all case you get back 0 */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { struct sctp_tcb *stcb; struct sockaddr_in6 *sin_a6; struct sctp_nets *net; int fnd; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_RUNLOCK(inp); - SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); return (ENOENT); } fnd = 0; sin_a6 = NULL; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr; if (sin_a6 == NULL) /* this will make coverity happy */ continue; if (sin_a6->sin6_family == AF_INET6) { fnd = 1; break; } } if ((!fnd) || (sin_a6 == NULL)) { /* punt */ SCTP_INP_RUNLOCK(inp); - SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); return (ENOENT); } vrf_id = inp->def_vrf_id; sctp_ifa = sctp_source_address_selection(inp, stcb, (sctp_route_t *)&net->ro, net, 0, vrf_id); if (sctp_ifa) { sin6->sin6_addr = sctp_ifa->address.sin6.sin6_addr; } } else { /* For the bound all case you get back 0 */ memset(&sin6->sin6_addr, 0, sizeof(sin6->sin6_addr)); } } else { /* Take the first IPv6 address in the list */ struct sctp_laddr *laddr; int fnd = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa->address.sa.sa_family == AF_INET6) { struct sockaddr_in6 *sin_a; sin_a = &laddr->ifa->address.sin6; sin6->sin6_addr = sin_a->sin6_addr; fnd = 1; break; } } if (!fnd) { - SCTP_FREE_SONAME(sin6); SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); return (ENOENT); } } SCTP_INP_RUNLOCK(inp); /* Scoping things for v6 */ if ((error = sa6_recoverscope(sin6)) != 0) { - SCTP_FREE_SONAME(sin6); return (error); } - (*addr) = (struct sockaddr *)sin6; + return (0); } static int -sctp6_peeraddr(struct socket *so, struct sockaddr **addr) +sctp6_peeraddr(struct socket *so, struct sockaddr *sa) { - struct sockaddr_in6 *sin6; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa; int fnd; struct sockaddr_in6 *sin_a6; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; int error; - /* Do the malloc first in case it blocks. */ - SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); - if (sin6 == NULL) - return (ENOMEM); - sin6->sin6_family = AF_INET6; - sin6->sin6_len = sizeof(*sin6); + *sin6 = (struct sockaddr_in6 ){ + .sin6_len = sizeof(struct sockaddr_in6), + .sin6_family = AF_INET6, + }; inp = (struct sctp_inpcb *)so->so_pcb; if ((inp == NULL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { /* UDP type and listeners will drop out here */ - SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOTCONN); return (ENOTCONN); } SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); if (stcb == NULL) { - SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET); return (ECONNRESET); } fnd = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr; if (sin_a6->sin6_family == AF_INET6) { fnd = 1; sin6->sin6_port = stcb->rport; sin6->sin6_addr = sin_a6->sin6_addr; break; } } SCTP_TCB_UNLOCK(stcb); if (!fnd) { /* No IPv4 address */ - SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); return (ENOENT); } if ((error = sa6_recoverscope(sin6)) != 0) { - SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, error); return (error); } - *addr = (struct sockaddr *)sin6; + return (0); } static int -sctp6_in6getaddr(struct socket *so, struct sockaddr **nam) +sctp6_in6getaddr(struct socket *so, struct sockaddr *sa) { struct inpcb *inp = sotoinpcb(so); int error; if (inp == NULL) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } /* allow v6 addresses precedence */ - error = sctp6_getaddr(so, nam); + error = sctp6_getaddr(so, sa); #ifdef INET if (error) { - struct sockaddr_in6 *sin6; + struct sockaddr_in sin; /* try v4 next if v6 failed */ - error = sctp_ingetaddr(so, nam); - if (error) { + error = sctp_ingetaddr(so, (struct sockaddr *)&sin); + if (error) return (error); - } - SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); - if (sin6 == NULL) { - SCTP_FREE_SONAME(*nam); - return (ENOMEM); - } - in6_sin_2_v4mapsin6((struct sockaddr_in *)*nam, sin6); - SCTP_FREE_SONAME(*nam); - *nam = (struct sockaddr *)sin6; + in6_sin_2_v4mapsin6(&sin, (struct sockaddr_in6 *)sa); } #endif return (error); } static int -sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam) +sctp6_getpeeraddr(struct socket *so, struct sockaddr *sa) { struct inpcb *inp = sotoinpcb(so); int error; if (inp == NULL) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } /* allow v6 addresses precedence */ - error = sctp6_peeraddr(so, nam); + error = sctp6_peeraddr(so, sa); #ifdef INET if (error) { - struct sockaddr_in6 *sin6; + struct sockaddr_in sin; /* try v4 next if v6 failed */ - error = sctp_peeraddr(so, nam); - if (error) { + error = sctp_peeraddr(so, (struct sockaddr *)&sin); + if (error) return (error); - } - SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); - if (sin6 == NULL) { - SCTP_FREE_SONAME(*nam); - return (ENOMEM); - } - in6_sin_2_v4mapsin6((struct sockaddr_in *)*nam, sin6); - SCTP_FREE_SONAME(*nam); - *nam = (struct sockaddr *)sin6; + in6_sin_2_v4mapsin6(&sin, (struct sockaddr_in6 *)sa); } #endif return (error); } #define SCTP6_PROTOSW \ .pr_protocol = IPPROTO_SCTP, \ .pr_ctloutput = sctp_ctloutput, \ .pr_abort = sctp_abort, \ .pr_accept = sctp_accept, \ .pr_attach = sctp6_attach, \ .pr_bind = sctp6_bind, \ .pr_connect = sctp6_connect, \ .pr_control = in6_control, \ .pr_close = sctp6_close, \ .pr_detach = sctp6_close, \ .pr_sopoll = sopoll_generic, \ .pr_flush = sctp_flush, \ .pr_disconnect = sctp_disconnect, \ .pr_listen = sctp_listen, \ .pr_peeraddr = sctp6_getpeeraddr, \ .pr_send = sctp6_send, \ .pr_shutdown = sctp_shutdown, \ .pr_sockaddr = sctp6_in6getaddr, \ .pr_sosend = sctp_sosend, \ .pr_soreceive = sctp_soreceive struct protosw sctp6_seqpacket_protosw = { .pr_type = SOCK_SEQPACKET, .pr_flags = PR_WANTRCVD, SCTP6_PROTOSW }; struct protosw sctp6_stream_protosw = { .pr_type = SOCK_STREAM, .pr_flags = PR_CONNREQUIRED | PR_WANTRCVD, SCTP6_PROTOSW }; #endif diff --git a/sys/netlink/netlink_domain.c b/sys/netlink/netlink_domain.c index 7b2bbd39447d..1b48ac65cec1 100644 --- a/sys/netlink/netlink_domain.c +++ b/sys/netlink/netlink_domain.c @@ -1,808 +1,799 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Ng Peng Nam Sean * Copyright (c) 2022 Alexander V. Chernikov * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This file contains socket and protocol bindings for netlink. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* priv_check */ #include #include #include #define DEBUG_MOD_NAME nl_domain #define DEBUG_MAX_LEVEL LOG_DEBUG3 #include _DECLARE_DEBUG(LOG_INFO); _Static_assert((NLP_MAX_GROUPS % 64) == 0, "NLP_MAX_GROUPS has to be multiple of 64"); _Static_assert(NLP_MAX_GROUPS >= 64, "NLP_MAX_GROUPS has to be at least 64"); #define NLCTL_TRACKER struct rm_priotracker nl_tracker #define NLCTL_RLOCK(_ctl) rm_rlock(&((_ctl)->ctl_lock), &nl_tracker) #define NLCTL_RUNLOCK(_ctl) rm_runlock(&((_ctl)->ctl_lock), &nl_tracker) #define NLCTL_WLOCK(_ctl) rm_wlock(&((_ctl)->ctl_lock)) #define NLCTL_WUNLOCK(_ctl) rm_wunlock(&((_ctl)->ctl_lock)) static u_long nl_sendspace = NLSNDQ; SYSCTL_ULONG(_net_netlink, OID_AUTO, sendspace, CTLFLAG_RW, &nl_sendspace, 0, "Default netlink socket send space"); static u_long nl_recvspace = NLSNDQ; SYSCTL_ULONG(_net_netlink, OID_AUTO, recvspace, CTLFLAG_RW, &nl_recvspace, 0, "Default netlink socket receive space"); extern u_long sb_max_adj; static u_long nl_maxsockbuf = 512 * 1024 * 1024; /* 512M, XXX: init based on physmem */ static int sysctl_handle_nl_maxsockbuf(SYSCTL_HANDLER_ARGS); SYSCTL_OID(_net_netlink, OID_AUTO, nl_maxsockbuf, CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, &nl_maxsockbuf, 0, sysctl_handle_nl_maxsockbuf, "LU", "Maximum Netlink socket buffer size"); static unsigned int osd_slot_id = 0; void nl_osd_register(void) { osd_slot_id = osd_register(OSD_THREAD, NULL, NULL); } void nl_osd_unregister(void) { osd_deregister(OSD_THREAD, osd_slot_id); } struct nlpcb * _nl_get_thread_nlp(struct thread *td) { return (osd_get(OSD_THREAD, &td->td_osd, osd_slot_id)); } void nl_set_thread_nlp(struct thread *td, struct nlpcb *nlp) { NLP_LOG(LOG_DEBUG2, nlp, "Set thread %p nlp to %p (slot %u)", td, nlp, osd_slot_id); if (osd_set(OSD_THREAD, &td->td_osd, osd_slot_id, nlp) == 0) return; /* Failed, need to realloc */ void **rsv = osd_reserve(osd_slot_id); osd_set_reserved(OSD_THREAD, &td->td_osd, osd_slot_id, rsv, nlp); } /* * Looks up a nlpcb struct based on the @portid. Need to claim nlsock_mtx. * Returns nlpcb pointer if present else NULL */ static struct nlpcb * nl_port_lookup(uint32_t port_id) { struct nlpcb *nlp; CK_LIST_FOREACH(nlp, &V_nl_ctl->ctl_port_head, nl_port_next) { if (nlp->nl_port == port_id) return (nlp); } return (NULL); } static void nl_add_group_locked(struct nlpcb *nlp, unsigned int group_id) { MPASS(group_id <= NLP_MAX_GROUPS); --group_id; /* TODO: add family handler callback */ if (!nlp_unconstrained_vnet(nlp)) return; nlp->nl_groups[group_id / 64] |= (uint64_t)1 << (group_id % 64); } static void nl_del_group_locked(struct nlpcb *nlp, unsigned int group_id) { MPASS(group_id <= NLP_MAX_GROUPS); --group_id; nlp->nl_groups[group_id / 64] &= ~((uint64_t)1 << (group_id % 64)); } static bool nl_isset_group_locked(struct nlpcb *nlp, unsigned int group_id) { MPASS(group_id <= NLP_MAX_GROUPS); --group_id; return (nlp->nl_groups[group_id / 64] & ((uint64_t)1 << (group_id % 64))); } static uint32_t nl_get_groups_compat(struct nlpcb *nlp) { uint32_t groups_mask = 0; for (int i = 0; i < 32; i++) { if (nl_isset_group_locked(nlp, i + 1)) groups_mask |= (1 << i); } return (groups_mask); } static void nl_send_one_group(struct mbuf *m, struct nlpcb *nlp, int num_messages, int io_flags) { if (__predict_false(nlp->nl_flags & NLF_MSG_INFO)) nl_add_msg_info(m); nl_send_one(m, nlp, num_messages, io_flags); } /* * Broadcasts message @m to the protocol @proto group specified by @group_id */ void nl_send_group(struct mbuf *m, int num_messages, int proto, int group_id) { struct nlpcb *nlp_last = NULL; struct nlpcb *nlp; NLCTL_TRACKER; IF_DEBUG_LEVEL(LOG_DEBUG2) { struct nlmsghdr *hdr = mtod(m, struct nlmsghdr *); NL_LOG(LOG_DEBUG2, "MCAST mbuf len %u msg type %d len %u to group %d/%d", m->m_len, hdr->nlmsg_type, hdr->nlmsg_len, proto, group_id); } struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); if (__predict_false(ctl == NULL)) { /* * Can be the case when notification is sent within VNET * which doesn't have any netlink sockets. */ m_freem(m); return; } NLCTL_RLOCK(ctl); int io_flags = NL_IOF_UNTRANSLATED; CK_LIST_FOREACH(nlp, &ctl->ctl_pcb_head, nl_next) { if (nl_isset_group_locked(nlp, group_id) && nlp->nl_proto == proto) { if (nlp_last != NULL) { struct mbuf *m_copy; m_copy = m_copym(m, 0, M_COPYALL, M_NOWAIT); if (m_copy != NULL) nl_send_one_group(m_copy, nlp_last, num_messages, io_flags); else { NLP_LOCK(nlp_last); if (nlp_last->nl_socket != NULL) sorwakeup(nlp_last->nl_socket); NLP_UNLOCK(nlp_last); } } nlp_last = nlp; } } if (nlp_last != NULL) nl_send_one_group(m, nlp_last, num_messages, io_flags); else m_freem(m); NLCTL_RUNLOCK(ctl); } bool nl_has_listeners(int netlink_family, uint32_t groups_mask) { return (V_nl_ctl != NULL); } static uint32_t nl_find_port(void) { /* * app can open multiple netlink sockets. * Start with current pid, if already taken, * try random numbers in 65k..256k+65k space, * avoiding clash with pids. */ if (nl_port_lookup(curproc->p_pid) == NULL) return (curproc->p_pid); for (int i = 0; i < 16; i++) { uint32_t nl_port = (arc4random() % 65536) + 65536 * 4; if (nl_port_lookup(nl_port) == 0) return (nl_port); NL_LOG(LOG_DEBUG3, "tried %u\n", nl_port); } return (curproc->p_pid); } static int nl_bind_locked(struct nlpcb *nlp, struct sockaddr_nl *snl) { if (nlp->nl_bound) { if (nlp->nl_port != snl->nl_pid) { NL_LOG(LOG_DEBUG, "bind() failed: program pid %d " "is different from provided pid %d", nlp->nl_port, snl->nl_pid); return (EINVAL); // XXX: better error } } else { if (snl->nl_pid == 0) snl->nl_pid = nl_find_port(); if (nl_port_lookup(snl->nl_pid) != NULL) return (EADDRINUSE); nlp->nl_port = snl->nl_pid; nlp->nl_bound = true; CK_LIST_INSERT_HEAD(&V_nl_ctl->ctl_port_head, nlp, nl_port_next); } for (int i = 0; i < 32; i++) { if (snl->nl_groups & ((uint32_t)1 << i)) nl_add_group_locked(nlp, i + 1); else nl_del_group_locked(nlp, i + 1); } return (0); } static int nl_pru_attach(struct socket *so, int proto, struct thread *td) { struct nlpcb *nlp; int error; if (__predict_false(netlink_unloading != 0)) return (EAFNOSUPPORT); error = nl_verify_proto(proto); if (error != 0) return (error); bool is_linux = SV_PROC_ABI(td->td_proc) == SV_ABI_LINUX; NL_LOG(LOG_DEBUG2, "socket %p, %sPID %d: attaching socket to %s", so, is_linux ? "(linux) " : "", curproc->p_pid, nl_get_proto_name(proto)); /* Create per-VNET state on first socket init */ struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); if (ctl == NULL) ctl = vnet_nl_ctl_init(); KASSERT(V_nl_ctl != NULL, ("nl_attach: vnet_sock_init() failed")); MPASS(sotonlpcb(so) == NULL); nlp = malloc(sizeof(struct nlpcb), M_PCB, M_WAITOK | M_ZERO); error = soreserve(so, nl_sendspace, nl_recvspace); if (error != 0) { free(nlp, M_PCB); return (error); } so->so_pcb = nlp; nlp->nl_socket = so; /* Copy so_cred to avoid having socket_var.h in every header */ nlp->nl_cred = so->so_cred; nlp->nl_proto = proto; nlp->nl_process_id = curproc->p_pid; nlp->nl_linux = is_linux; nlp->nl_active = true; nlp->nl_unconstrained_vnet = !jailed_without_vnet(so->so_cred); nlp->nl_need_thread_setup = true; NLP_LOCK_INIT(nlp); refcount_init(&nlp->nl_refcount, 1); nl_init_io(nlp); nlp->nl_taskqueue = taskqueue_create("netlink_socket", M_WAITOK, taskqueue_thread_enqueue, &nlp->nl_taskqueue); TASK_INIT(&nlp->nl_task, 0, nl_taskqueue_handler, nlp); taskqueue_start_threads(&nlp->nl_taskqueue, 1, PWAIT, "netlink_socket (PID %u)", nlp->nl_process_id); NLCTL_WLOCK(ctl); /* XXX: check ctl is still alive */ CK_LIST_INSERT_HEAD(&ctl->ctl_pcb_head, nlp, nl_next); NLCTL_WUNLOCK(ctl); soisconnected(so); return (0); } static void nl_pru_abort(struct socket *so) { NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); MPASS(sotonlpcb(so) != NULL); soisdisconnected(so); } static int nl_pru_bind(struct socket *so, struct sockaddr *sa, struct thread *td) { struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); struct nlpcb *nlp = sotonlpcb(so); struct sockaddr_nl *snl = (struct sockaddr_nl *)sa; int error; NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); if (snl->nl_len != sizeof(*snl)) { NL_LOG(LOG_DEBUG, "socket %p, wrong sizeof(), ignoring bind()", so); return (EINVAL); } NLCTL_WLOCK(ctl); NLP_LOCK(nlp); error = nl_bind_locked(nlp, snl); NLP_UNLOCK(nlp); NLCTL_WUNLOCK(ctl); NL_LOG(LOG_DEBUG2, "socket %p, bind() to %u, groups %u, error %d", so, snl->nl_pid, snl->nl_groups, error); return (error); } static int nl_assign_port(struct nlpcb *nlp, uint32_t port_id) { struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); struct sockaddr_nl snl = { .nl_pid = port_id, }; int error; NLCTL_WLOCK(ctl); NLP_LOCK(nlp); snl.nl_groups = nl_get_groups_compat(nlp); error = nl_bind_locked(nlp, &snl); NLP_UNLOCK(nlp); NLCTL_WUNLOCK(ctl); NL_LOG(LOG_DEBUG3, "socket %p, port assign: %d, error: %d", nlp->nl_socket, port_id, error); return (error); } /* * nl_autobind_port binds a unused portid to @nlp * @nlp: pcb data for the netlink socket * @candidate_id: first id to consider */ static int nl_autobind_port(struct nlpcb *nlp, uint32_t candidate_id) { struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); uint32_t port_id = candidate_id; NLCTL_TRACKER; bool exist; int error = EADDRINUSE; for (int i = 0; i < 10; i++) { NL_LOG(LOG_DEBUG3, "socket %p, trying to assign port %d", nlp->nl_socket, port_id); NLCTL_RLOCK(ctl); exist = nl_port_lookup(port_id) != 0; NLCTL_RUNLOCK(ctl); if (!exist) { error = nl_assign_port(nlp, port_id); if (error != EADDRINUSE) break; } port_id++; } NL_LOG(LOG_DEBUG3, "socket %p, autobind to %d, error: %d", nlp->nl_socket, port_id, error); return (error); } static int nl_pru_connect(struct socket *so, struct sockaddr *sa, struct thread *td) { struct sockaddr_nl *snl = (struct sockaddr_nl *)sa; struct nlpcb *nlp; NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); if (snl->nl_len != sizeof(*snl)) { NL_LOG(LOG_DEBUG, "socket %p, wrong sizeof(), ignoring bind()", so); return (EINVAL); } nlp = sotonlpcb(so); if (!nlp->nl_bound) { int error = nl_autobind_port(nlp, td->td_proc->p_pid); if (error != 0) { NL_LOG(LOG_DEBUG, "socket %p, nl_autobind() failed: %d", so, error); return (error); } } /* XXX: Handle socket flags & multicast */ soisconnected(so); NL_LOG(LOG_DEBUG2, "socket %p, connect to %u", so, snl->nl_pid); return (0); } static void destroy_nlpcb(struct nlpcb *nlp) { NLP_LOCK(nlp); nl_free_io(nlp); NLP_LOCK_DESTROY(nlp); free(nlp, M_PCB); } static void destroy_nlpcb_epoch(epoch_context_t ctx) { struct nlpcb *nlp; nlp = __containerof(ctx, struct nlpcb, nl_epoch_ctx); destroy_nlpcb(nlp); } static void nl_pru_detach(struct socket *so) { struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); MPASS(sotonlpcb(so) != NULL); struct nlpcb *nlp; NL_LOG(LOG_DEBUG2, "detaching socket %p, PID %d", so, curproc->p_pid); nlp = sotonlpcb(so); /* Mark as inactive so no new work can be enqueued */ NLP_LOCK(nlp); bool was_bound = nlp->nl_bound; nlp->nl_active = false; NLP_UNLOCK(nlp); /* Wait till all scheduled work has been completed */ taskqueue_drain_all(nlp->nl_taskqueue); taskqueue_free(nlp->nl_taskqueue); NLCTL_WLOCK(ctl); NLP_LOCK(nlp); if (was_bound) { CK_LIST_REMOVE(nlp, nl_port_next); NL_LOG(LOG_DEBUG3, "socket %p, unlinking bound pid %u", so, nlp->nl_port); } CK_LIST_REMOVE(nlp, nl_next); nlp->nl_socket = NULL; NLP_UNLOCK(nlp); NLCTL_WUNLOCK(ctl); so->so_pcb = NULL; NL_LOG(LOG_DEBUG3, "socket %p, detached", so); /* XXX: is delayed free needed? */ NET_EPOCH_CALL(destroy_nlpcb_epoch, &nlp->nl_epoch_ctx); } static int nl_pru_disconnect(struct socket *so) { NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); MPASS(sotonlpcb(so) != NULL); return (ENOTCONN); } -static int -nl_pru_peeraddr(struct socket *so, struct sockaddr **sa) -{ - NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); - MPASS(sotonlpcb(so) != NULL); - return (ENOTCONN); -} - static int nl_pru_shutdown(struct socket *so) { NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); MPASS(sotonlpcb(so) != NULL); socantsendmore(so); return (0); } static int -nl_pru_sockaddr(struct socket *so, struct sockaddr **sa) +nl_sockaddr(struct socket *so, struct sockaddr *sa) { - struct sockaddr_nl *snl; - snl = malloc(sizeof(struct sockaddr_nl), M_SONAME, M_WAITOK | M_ZERO); - /* TODO: set other fields */ - snl->nl_len = sizeof(struct sockaddr_nl); - snl->nl_family = AF_NETLINK; - snl->nl_pid = sotonlpcb(so)->nl_port; - *sa = (struct sockaddr *)snl; + *(struct sockaddr_nl *)sa = (struct sockaddr_nl ){ + /* TODO: set other fields */ + .nl_len = sizeof(struct sockaddr_nl), + .nl_family = AF_NETLINK, + .nl_pid = sotonlpcb(so)->nl_port, + }; + return (0); } static void nl_pru_close(struct socket *so) { NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); MPASS(sotonlpcb(so) != NULL); soisdisconnected(so); } static int nl_pru_output(struct mbuf *m, struct socket *so, ...) { if (__predict_false(m == NULL || ((m->m_len < sizeof(struct nlmsghdr)) && (m = m_pullup(m, sizeof(struct nlmsghdr))) == NULL))) return (ENOBUFS); MPASS((m->m_flags & M_PKTHDR) != 0); NL_LOG(LOG_DEBUG3, "sending message to kernel async processing"); nl_receive_async(m, so); return (0); } static int nl_pru_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *sa, struct mbuf *control, struct thread *td) { NL_LOG(LOG_DEBUG2, "sending message to kernel"); if (__predict_false(control != NULL)) { if (control->m_len) { m_freem(control); return (EINVAL); } m_freem(control); } return (nl_pru_output(m, so)); } static int nl_pru_rcvd(struct socket *so, int flags) { NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); MPASS(sotonlpcb(so) != NULL); nl_on_transmit(sotonlpcb(so)); return (0); } static int nl_getoptflag(int sopt_name) { switch (sopt_name) { case NETLINK_CAP_ACK: return (NLF_CAP_ACK); case NETLINK_EXT_ACK: return (NLF_EXT_ACK); case NETLINK_GET_STRICT_CHK: return (NLF_STRICT); case NETLINK_MSG_INFO: return (NLF_MSG_INFO); } return (0); } static int nl_ctloutput(struct socket *so, struct sockopt *sopt) { struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); struct nlpcb *nlp = sotonlpcb(so); uint32_t flag; int optval, error = 0; NLCTL_TRACKER; NL_LOG(LOG_DEBUG2, "%ssockopt(%p, %d)", (sopt->sopt_dir) ? "set" : "get", so, sopt->sopt_name); switch (sopt->sopt_dir) { case SOPT_SET: switch (sopt->sopt_name) { case NETLINK_ADD_MEMBERSHIP: case NETLINK_DROP_MEMBERSHIP: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); if (error != 0) break; if (optval <= 0 || optval >= NLP_MAX_GROUPS) { error = ERANGE; break; } NL_LOG(LOG_DEBUG2, "ADD/DEL group %d", (uint32_t)optval); NLCTL_WLOCK(ctl); if (sopt->sopt_name == NETLINK_ADD_MEMBERSHIP) nl_add_group_locked(nlp, optval); else nl_del_group_locked(nlp, optval); NLCTL_WUNLOCK(ctl); break; case NETLINK_CAP_ACK: case NETLINK_EXT_ACK: case NETLINK_GET_STRICT_CHK: case NETLINK_MSG_INFO: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); if (error != 0) break; flag = nl_getoptflag(sopt->sopt_name); if ((flag == NLF_MSG_INFO) && nlp->nl_linux) { error = EINVAL; break; } NLCTL_WLOCK(ctl); if (optval != 0) nlp->nl_flags |= flag; else nlp->nl_flags &= ~flag; NLCTL_WUNLOCK(ctl); break; default: error = ENOPROTOOPT; } break; case SOPT_GET: switch (sopt->sopt_name) { case NETLINK_LIST_MEMBERSHIPS: NLCTL_RLOCK(ctl); optval = nl_get_groups_compat(nlp); NLCTL_RUNLOCK(ctl); error = sooptcopyout(sopt, &optval, sizeof(optval)); break; case NETLINK_CAP_ACK: case NETLINK_EXT_ACK: case NETLINK_GET_STRICT_CHK: case NETLINK_MSG_INFO: NLCTL_RLOCK(ctl); optval = (nlp->nl_flags & nl_getoptflag(sopt->sopt_name)) != 0; NLCTL_RUNLOCK(ctl); error = sooptcopyout(sopt, &optval, sizeof(optval)); break; default: error = ENOPROTOOPT; } break; default: error = ENOPROTOOPT; } return (error); } static int sysctl_handle_nl_maxsockbuf(SYSCTL_HANDLER_ARGS) { int error = 0; u_long tmp_maxsockbuf = nl_maxsockbuf; error = sysctl_handle_long(oidp, &tmp_maxsockbuf, arg2, req); if (error || !req->newptr) return (error); if (tmp_maxsockbuf < MSIZE + MCLBYTES) return (EINVAL); nl_maxsockbuf = tmp_maxsockbuf; return (0); } static int nl_setsbopt(struct socket *so, struct sockopt *sopt) { int error, optval; bool result; if (sopt->sopt_name != SO_RCVBUF) return (sbsetopt(so, sopt)); /* Allow to override max buffer size in certain conditions */ error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error != 0) return (error); NL_LOG(LOG_DEBUG2, "socket %p, PID %d, SO_RCVBUF=%d", so, curproc->p_pid, optval); if (optval > sb_max_adj) { if (priv_check(curthread, PRIV_NET_ROUTE) != 0) return (EPERM); } SOCK_RECVBUF_LOCK(so); result = sbreserve_locked_limit(so, SO_RCV, optval, nl_maxsockbuf, curthread); SOCK_RECVBUF_UNLOCK(so); return (result ? 0 : ENOBUFS); } #define NETLINK_PROTOSW \ .pr_flags = PR_ATOMIC | PR_ADDR | PR_WANTRCVD, \ .pr_ctloutput = nl_ctloutput, \ .pr_setsbopt = nl_setsbopt, \ .pr_abort = nl_pru_abort, \ .pr_attach = nl_pru_attach, \ .pr_bind = nl_pru_bind, \ .pr_connect = nl_pru_connect, \ .pr_detach = nl_pru_detach, \ .pr_disconnect = nl_pru_disconnect, \ - .pr_peeraddr = nl_pru_peeraddr, \ .pr_send = nl_pru_send, \ .pr_rcvd = nl_pru_rcvd, \ .pr_shutdown = nl_pru_shutdown, \ - .pr_sockaddr = nl_pru_sockaddr, \ + .pr_sockaddr = nl_sockaddr, \ .pr_close = nl_pru_close static struct protosw netlink_raw_sw = { .pr_type = SOCK_RAW, NETLINK_PROTOSW }; static struct protosw netlink_dgram_sw = { .pr_type = SOCK_DGRAM, NETLINK_PROTOSW }; static struct domain netlinkdomain = { .dom_family = PF_NETLINK, .dom_name = "netlink", .dom_flags = DOMF_UNLOADABLE, .dom_nprotosw = 2, .dom_protosw = { &netlink_raw_sw, &netlink_dgram_sw }, }; DOMAIN_SET(netlink); diff --git a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c index a2d5f12f27ac..8def3b37bf46 100644 --- a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c @@ -1,1918 +1,1918 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 * The Regents of the University of California. All rights reserved. * Copyright (c) 2004 The FreeBSD Foundation. All rights reserved. * Copyright (c) 2004-2008 Robert N. M. Watson. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Excerpts taken from tcp_subr.c, tcp_usrreq.c, uipc_socket.c */ /* * * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include "sdp.h" #include #include #include #include uma_zone_t sdp_zone; struct rwlock sdp_lock; LIST_HEAD(, sdp_sock) sdp_list; struct workqueue_struct *rx_comp_wq; RW_SYSINIT(sdplockinit, &sdp_lock, "SDP lock"); #define SDP_LIST_WLOCK() rw_wlock(&sdp_lock) #define SDP_LIST_RLOCK() rw_rlock(&sdp_lock) #define SDP_LIST_WUNLOCK() rw_wunlock(&sdp_lock) #define SDP_LIST_RUNLOCK() rw_runlock(&sdp_lock) #define SDP_LIST_WLOCK_ASSERT() rw_assert(&sdp_lock, RW_WLOCKED) #define SDP_LIST_RLOCK_ASSERT() rw_assert(&sdp_lock, RW_RLOCKED) #define SDP_LIST_LOCK_ASSERT() rw_assert(&sdp_lock, RW_LOCKED) MALLOC_DEFINE(M_SDP, "sdp", "Sockets Direct Protocol"); static void sdp_stop_keepalive_timer(struct socket *so); /* * SDP protocol interface to socket abstraction. */ /* * sdp_sendspace and sdp_recvspace are the default send and receive window * sizes, respectively. */ u_long sdp_sendspace = 1024*32; u_long sdp_recvspace = 1024*64; static int sdp_count; /* * Disable async. CMA events for sockets which are being torn down. */ static void sdp_destroy_cma(struct sdp_sock *ssk) { if (ssk->id == NULL) return; rdma_destroy_id(ssk->id); ssk->id = NULL; } static int sdp_pcbbind(struct sdp_sock *ssk, struct sockaddr *nam, struct ucred *cred) { struct sockaddr_in *sin; struct sockaddr_in null; int error; SDP_WLOCK_ASSERT(ssk); if (ssk->lport != 0 || ssk->laddr != INADDR_ANY) return (EINVAL); /* rdma_bind_addr handles bind races. */ SDP_WUNLOCK(ssk); if (ssk->id == NULL) ssk->id = rdma_create_id(&init_net, sdp_cma_handler, ssk, RDMA_PS_SDP, IB_QPT_RC); if (ssk->id == NULL) { SDP_WLOCK(ssk); return (ENOMEM); } if (nam == NULL) { null.sin_family = AF_INET; null.sin_len = sizeof(null); null.sin_addr.s_addr = INADDR_ANY; null.sin_port = 0; bzero(&null.sin_zero, sizeof(null.sin_zero)); nam = (struct sockaddr *)&null; } error = -rdma_bind_addr(ssk->id, nam); SDP_WLOCK(ssk); if (error == 0) { sin = (struct sockaddr_in *)&ssk->id->route.addr.src_addr; ssk->laddr = sin->sin_addr.s_addr; ssk->lport = sin->sin_port; } else sdp_destroy_cma(ssk); return (error); } static void sdp_pcbfree(struct sdp_sock *ssk) { KASSERT(ssk->socket == NULL, ("ssk %p socket still attached", ssk)); KASSERT((ssk->flags & SDP_DESTROY) == 0, ("ssk %p already destroyed", ssk)); sdp_dbg(ssk->socket, "Freeing pcb"); SDP_WLOCK_ASSERT(ssk); ssk->flags |= SDP_DESTROY; SDP_WUNLOCK(ssk); SDP_LIST_WLOCK(); sdp_count--; LIST_REMOVE(ssk, list); SDP_LIST_WUNLOCK(); crfree(ssk->cred); ssk->qp_active = 0; if (ssk->qp) { ib_destroy_qp(ssk->qp); ssk->qp = NULL; } sdp_tx_ring_destroy(ssk); sdp_rx_ring_destroy(ssk); sdp_destroy_cma(ssk); rw_destroy(&ssk->rx_ring.destroyed_lock); rw_destroy(&ssk->lock); uma_zfree(sdp_zone, ssk); } /* * Common routines to return a socket address. */ static struct sockaddr * sdp_sockaddr(in_port_t port, struct in_addr *addr_p) { struct sockaddr_in *sin; sin = malloc(sizeof *sin, M_SONAME, M_WAITOK | M_ZERO); sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_addr = *addr_p; sin->sin_port = port; return (struct sockaddr *)sin; } static int -sdp_getsockaddr(struct socket *so, struct sockaddr **nam) +sdp_getsockaddr(struct socket *so, struct sockaddr *sa) { - struct sdp_sock *ssk; - struct in_addr addr; - in_port_t port; + struct sdp_sock *ssk = sdp_sk(so); - ssk = sdp_sk(so); SDP_RLOCK(ssk); - port = ssk->lport; - addr.s_addr = ssk->laddr; + *(struct sockaddr_in *)sa = (struct sockaddr_in ){ + .sin_family = AF_INET, + .sin_len = sizeof(struct sockaddr_in), + .sin_addr.s_addr = ssk->laddr, + .sin_port = ssk->lport, + }; SDP_RUNLOCK(ssk); - *nam = sdp_sockaddr(port, &addr); - return 0; + return (0); } static int -sdp_getpeeraddr(struct socket *so, struct sockaddr **nam) +sdp_getpeeraddr(struct socket *so, struct sockaddr *sa) { - struct sdp_sock *ssk; - struct in_addr addr; - in_port_t port; + struct sdp_sock *ssk = sdp_sk(so); - ssk = sdp_sk(so); SDP_RLOCK(ssk); - port = ssk->fport; - addr.s_addr = ssk->faddr; + *(struct sockaddr_in *)sa = (struct sockaddr_in ){ + .sin_family = AF_INET, + .sin_len = sizeof(struct sockaddr_in), + .sin_addr.s_addr = ssk->faddr, + .sin_port = ssk->fport, + }; SDP_RUNLOCK(ssk); - *nam = sdp_sockaddr(port, &addr); - return 0; + return (0); } #if 0 static void sdp_apply_all(void (*func)(struct sdp_sock *, void *), void *arg) { struct sdp_sock *ssk; SDP_LIST_RLOCK(); LIST_FOREACH(ssk, &sdp_list, list) { SDP_WLOCK(ssk); func(ssk, arg); SDP_WUNLOCK(ssk); } SDP_LIST_RUNLOCK(); } #endif static void sdp_output_reset(struct sdp_sock *ssk) { struct rdma_cm_id *id; SDP_WLOCK_ASSERT(ssk); if (ssk->id) { id = ssk->id; ssk->qp_active = 0; SDP_WUNLOCK(ssk); rdma_disconnect(id); SDP_WLOCK(ssk); } ssk->state = TCPS_CLOSED; } /* * Attempt to close a SDP socket, marking it as dropped, and freeing * the socket if we hold the only reference. */ static struct sdp_sock * sdp_closed(struct sdp_sock *ssk) { struct socket *so; SDP_WLOCK_ASSERT(ssk); ssk->flags |= SDP_DROPPED; so = ssk->socket; soisdisconnected(so); if (ssk->flags & SDP_SOCKREF) { ssk->flags &= ~SDP_SOCKREF; SDP_WUNLOCK(ssk); sorele(so); return (NULL); } return (ssk); } /* * Perform timer based shutdowns which can not operate in * callout context. */ static void sdp_shutdown_task(void *data, int pending) { struct sdp_sock *ssk; ssk = data; SDP_WLOCK(ssk); /* * I don't think this can race with another call to pcbfree() * because SDP_TIMEWAIT protects it. SDP_DESTROY may be redundant. */ if (ssk->flags & SDP_DESTROY) panic("sdp_shutdown_task: Racing with pcbfree for ssk %p", ssk); if (ssk->flags & SDP_DISCON) sdp_output_reset(ssk); /* We have to clear this so sdp_detach() will call pcbfree(). */ ssk->flags &= ~(SDP_TIMEWAIT | SDP_DREQWAIT); if ((ssk->flags & SDP_DROPPED) == 0 && sdp_closed(ssk) == NULL) return; if (ssk->socket == NULL) { sdp_pcbfree(ssk); return; } SDP_WUNLOCK(ssk); } /* * 2msl has expired, schedule the shutdown task. */ static void sdp_2msl_timeout(void *data) { struct sdp_sock *ssk; ssk = data; /* Callout canceled. */ if (!callout_active(&ssk->keep2msl)) goto out; callout_deactivate(&ssk->keep2msl); /* Should be impossible, defensive programming. */ if ((ssk->flags & SDP_TIMEWAIT) == 0) goto out; taskqueue_enqueue(taskqueue_thread, &ssk->shutdown_task); out: SDP_WUNLOCK(ssk); return; } /* * Schedule the 2msl wait timer. */ static void sdp_2msl_wait(struct sdp_sock *ssk) { SDP_WLOCK_ASSERT(ssk); ssk->flags |= SDP_TIMEWAIT; ssk->state = TCPS_TIME_WAIT; soisdisconnected(ssk->socket); callout_reset(&ssk->keep2msl, TCPTV_MSL, sdp_2msl_timeout, ssk); } /* * Timed out waiting for the final fin/ack from rdma_disconnect(). */ static void sdp_dreq_timeout(void *data) { struct sdp_sock *ssk; ssk = data; /* Callout canceled. */ if (!callout_active(&ssk->keep2msl)) goto out; /* Callout rescheduled, probably as a different timer. */ if (callout_pending(&ssk->keep2msl)) goto out; callout_deactivate(&ssk->keep2msl); if (ssk->state != TCPS_FIN_WAIT_1 && ssk->state != TCPS_LAST_ACK) goto out; if ((ssk->flags & SDP_DREQWAIT) == 0) goto out; ssk->flags &= ~SDP_DREQWAIT; ssk->flags |= SDP_DISCON; sdp_2msl_wait(ssk); ssk->qp_active = 0; out: SDP_WUNLOCK(ssk); } /* * Received the final fin/ack. Cancel the 2msl. */ void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk) { sdp_dbg(ssk->socket, "cancelling dreq wait timeout\n"); ssk->flags &= ~SDP_DREQWAIT; sdp_2msl_wait(ssk); } static int sdp_init_sock(struct socket *sk) { struct sdp_sock *ssk = sdp_sk(sk); sdp_dbg(sk, "%s\n", __func__); callout_init_rw(&ssk->keep2msl, &ssk->lock, CALLOUT_RETURNUNLOCKED); TASK_INIT(&ssk->shutdown_task, 0, sdp_shutdown_task, ssk); #ifdef SDP_ZCOPY INIT_DELAYED_WORK(&ssk->srcavail_cancel_work, srcavail_cancel_timeout); ssk->zcopy_thresh = -1; /* use global sdp_zcopy_thresh */ ssk->tx_ring.rdma_inflight = NULL; #endif atomic_set(&ssk->mseq_ack, 0); sdp_rx_ring_init(ssk); ssk->tx_ring.buffer = NULL; return 0; } /* * Allocate an sdp_sock for the socket and reserve socket buffer space. */ static int sdp_attach(struct socket *so, int proto, struct thread *td) { struct sdp_sock *ssk; int error; ssk = sdp_sk(so); KASSERT(ssk == NULL, ("sdp_attach: ssk already set on so %p", so)); if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = soreserve(so, sdp_sendspace, sdp_recvspace); if (error) return (error); } so->so_rcv.sb_flags |= SB_AUTOSIZE; so->so_snd.sb_flags |= SB_AUTOSIZE; ssk = uma_zalloc(sdp_zone, M_NOWAIT | M_ZERO); if (ssk == NULL) return (ENOBUFS); rw_init(&ssk->lock, "sdpsock"); ssk->socket = so; ssk->cred = crhold(so->so_cred); so->so_pcb = (caddr_t)ssk; sdp_init_sock(so); ssk->flags = 0; ssk->qp_active = 0; ssk->state = TCPS_CLOSED; mbufq_init(&ssk->rxctlq, INT_MAX); SDP_LIST_WLOCK(); LIST_INSERT_HEAD(&sdp_list, ssk, list); sdp_count++; SDP_LIST_WUNLOCK(); return (0); } /* * Detach SDP from the socket, potentially leaving it around for the * timewait to expire. */ static void sdp_detach(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); KASSERT(ssk->socket != NULL, ("sdp_detach: socket is NULL")); ssk->socket->so_pcb = NULL; ssk->socket = NULL; if (ssk->flags & (SDP_TIMEWAIT | SDP_DREQWAIT)) SDP_WUNLOCK(ssk); else if (ssk->flags & SDP_DROPPED || ssk->state < TCPS_SYN_SENT) sdp_pcbfree(ssk); else panic("sdp_detach: Unexpected state, ssk %p.\n", ssk); } /* * Allocate a local address for the socket. */ static int sdp_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { int error = 0; struct sdp_sock *ssk; struct sockaddr_in *sin; sin = (struct sockaddr_in *)nam; if (sin->sin_family != AF_INET) return (EAFNOSUPPORT); if (nam->sa_len != sizeof(*sin)) return (EINVAL); if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) return (EAFNOSUPPORT); ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = EINVAL; goto out; } error = sdp_pcbbind(ssk, nam, td->td_ucred); out: SDP_WUNLOCK(ssk); return (error); } /* * Prepare to accept connections. */ static int sdp_listen(struct socket *so, int backlog, struct thread *td) { int error = 0; struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = EINVAL; goto out; } if (error == 0 && ssk->lport == 0) error = sdp_pcbbind(ssk, (struct sockaddr *)0, td->td_ucred); SOCK_LOCK(so); if (error == 0) error = solisten_proto_check(so); if (error == 0) { solisten_proto(so, backlog); ssk->state = TCPS_LISTEN; } SOCK_UNLOCK(so); out: SDP_WUNLOCK(ssk); if (error == 0) error = -rdma_listen(ssk->id, backlog); return (error); } /* * Initiate a SDP connection to nam. */ static int sdp_start_connect(struct sdp_sock *ssk, struct sockaddr *nam, struct thread *td) { struct sockaddr_in src; struct socket *so; int error; so = ssk->socket; SDP_WLOCK_ASSERT(ssk); if (ssk->lport == 0) { error = sdp_pcbbind(ssk, (struct sockaddr *)0, td->td_ucred); if (error) return error; } src.sin_family = AF_INET; src.sin_len = sizeof(src); bzero(&src.sin_zero, sizeof(src.sin_zero)); src.sin_port = ssk->lport; src.sin_addr.s_addr = ssk->laddr; soisconnecting(so); SDP_WUNLOCK(ssk); error = -rdma_resolve_addr(ssk->id, (struct sockaddr *)&src, nam, SDP_RESOLVE_TIMEOUT); SDP_WLOCK(ssk); if (error == 0) ssk->state = TCPS_SYN_SENT; return 0; } /* * Initiate SDP connection. */ static int sdp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { int error = 0; struct sdp_sock *ssk; struct sockaddr_in *sin; sin = (struct sockaddr_in *)nam; if (nam->sa_len != sizeof(*sin)) return (EINVAL); if (sin->sin_family != AF_INET) return (EAFNOSUPPORT); if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) return (EAFNOSUPPORT); if ((error = prison_remote_ip4(td->td_ucred, &sin->sin_addr)) != 0) return (error); ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) error = EINVAL; else error = sdp_start_connect(ssk, nam, td); SDP_WUNLOCK(ssk); return (error); } /* * Drop a SDP socket, reporting * the specified error. If connection is synchronized, * then send a RST to peer. */ static struct sdp_sock * sdp_drop(struct sdp_sock *ssk, int errno) { struct socket *so; SDP_WLOCK_ASSERT(ssk); so = ssk->socket; if (TCPS_HAVERCVDSYN(ssk->state)) sdp_output_reset(ssk); if (errno == ETIMEDOUT && ssk->softerror) errno = ssk->softerror; so->so_error = errno; return (sdp_closed(ssk)); } /* * User issued close, and wish to trail through shutdown states: * if never received SYN, just forget it. If got a SYN from peer, * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN. * If already got a FIN from peer, then almost done; go to LAST_ACK * state. In all other cases, have already sent FIN to peer (e.g. * after PRU_SHUTDOWN), and just have to play tedious game waiting * for peer to send FIN or not respond to keep-alives, etc. * We can let the user exit from the close as soon as the FIN is acked. */ static void sdp_usrclosed(struct sdp_sock *ssk) { SDP_WLOCK_ASSERT(ssk); switch (ssk->state) { case TCPS_LISTEN: ssk->state = TCPS_CLOSED; SDP_WUNLOCK(ssk); sdp_destroy_cma(ssk); SDP_WLOCK(ssk); /* FALLTHROUGH */ case TCPS_CLOSED: ssk = sdp_closed(ssk); /* * sdp_closed() should never return NULL here as the socket is * still open. */ KASSERT(ssk != NULL, ("sdp_usrclosed: sdp_closed() returned NULL")); break; case TCPS_SYN_SENT: /* FALLTHROUGH */ case TCPS_SYN_RECEIVED: ssk->flags |= SDP_NEEDFIN; break; case TCPS_ESTABLISHED: ssk->flags |= SDP_NEEDFIN; ssk->state = TCPS_FIN_WAIT_1; break; case TCPS_CLOSE_WAIT: ssk->state = TCPS_LAST_ACK; break; } if (ssk->state >= TCPS_FIN_WAIT_2) { /* Prevent the connection hanging in FIN_WAIT_2 forever. */ if (ssk->state == TCPS_FIN_WAIT_2) sdp_2msl_wait(ssk); else soisdisconnected(ssk->socket); } } static void sdp_output_disconnect(struct sdp_sock *ssk) { SDP_WLOCK_ASSERT(ssk); callout_reset(&ssk->keep2msl, SDP_FIN_WAIT_TIMEOUT, sdp_dreq_timeout, ssk); ssk->flags |= SDP_NEEDFIN | SDP_DREQWAIT; sdp_post_sends(ssk, M_NOWAIT); } /* * Initiate or continue a disconnect. * If embryonic state, just send reset (once). * If in ``let data drain'' option and linger null, just drop. * Otherwise (hard), mark socket disconnecting and drop * current input data; switch states based on user close, and * send segment to peer (with FIN). */ static void sdp_start_disconnect(struct sdp_sock *ssk) { struct socket *so; int unread; so = ssk->socket; SDP_WLOCK_ASSERT(ssk); sdp_stop_keepalive_timer(so); /* * Neither sdp_closed() nor sdp_drop() should return NULL, as the * socket is still open. */ if (ssk->state < TCPS_ESTABLISHED) { ssk = sdp_closed(ssk); KASSERT(ssk != NULL, ("sdp_start_disconnect: sdp_close() returned NULL")); } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) { ssk = sdp_drop(ssk, 0); KASSERT(ssk != NULL, ("sdp_start_disconnect: sdp_drop() returned NULL")); } else { soisdisconnecting(so); unread = sbused(&so->so_rcv); sbflush(&so->so_rcv); sdp_usrclosed(ssk); if (!(ssk->flags & SDP_DROPPED)) { if (unread) sdp_output_reset(ssk); else sdp_output_disconnect(ssk); } } } /* * User initiated disconnect. */ static int sdp_disconnect(struct socket *so) { struct sdp_sock *ssk; int error = 0; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = ECONNRESET; goto out; } sdp_start_disconnect(ssk); out: SDP_WUNLOCK(ssk); return (error); } /* * Accept a connection. Essentially all the work is done at higher levels; * just return the address of the peer, storing through addr. * * * XXX This is broken XXX * * The rationale for acquiring the sdp lock here is somewhat complicated, * and is described in detail in the commit log entry for r175612. Acquiring * it delays an accept(2) racing with sonewconn(), which inserts the socket * before the address/port fields are initialized. A better fix would * prevent the socket from being placed in the listen queue until all fields * are fully initialized. */ static int sdp_accept(struct socket *so, struct sockaddr *sa) { struct sdp_sock *ssk = NULL; int error; if (so->so_state & SS_ISDISCONNECTED) return (ECONNABORTED); error = 0; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) error = ECONNABORTED; else *(struct sockaddr_in *)sa = (struct sockaddr_in ){ .sin_family = AF_INET, .sin_len = sizeof(struct sockaddr_in), .sin_addr.s_addr = ssk->faddr, .sin_port = ssk->fport, }; SDP_WUNLOCK(ssk); return (error); } /* * Mark the connection as being incapable of further output. */ static int sdp_shutdown(struct socket *so) { int error = 0; struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = ECONNRESET; goto out; } socantsendmore(so); sdp_usrclosed(ssk); if (!(ssk->flags & SDP_DROPPED)) sdp_output_disconnect(ssk); out: SDP_WUNLOCK(ssk); return (error); } static void sdp_append(struct sdp_sock *ssk, struct sockbuf *sb, struct mbuf *mb, int cnt) { struct mbuf *n; int ncnt; SOCKBUF_LOCK_ASSERT(sb); SBLASTRECORDCHK(sb); KASSERT(mb->m_flags & M_PKTHDR, ("sdp_append: %p Missing packet header.\n", mb)); n = sb->sb_lastrecord; /* * If the queue is empty just set all pointers and proceed. */ if (n == NULL) { sb->sb_lastrecord = sb->sb_mb = sb->sb_sndptr = mb; for (; mb; mb = mb->m_next) { sb->sb_mbtail = mb; sballoc(sb, mb); } return; } /* * Count the number of mbufs in the current tail. */ for (ncnt = 0; n->m_next; n = n->m_next) ncnt++; n = sb->sb_lastrecord; /* * If the two chains can fit in a single sdp packet and * the last record has not been sent yet (WRITABLE) coalesce * them. The lastrecord remains the same but we must strip the * packet header and then let sbcompress do the hard part. */ if (M_WRITABLE(n) && ncnt + cnt < SDP_MAX_SEND_SGES && n->m_pkthdr.len + mb->m_pkthdr.len - SDP_HEAD_SIZE < ssk->xmit_size_goal) { m_adj(mb, SDP_HEAD_SIZE); n->m_pkthdr.len += mb->m_pkthdr.len; n->m_flags |= mb->m_flags & (M_PUSH | M_URG); m_demote(mb, 1, 0); sbcompress(sb, mb, sb->sb_mbtail); return; } /* * Not compressible, just append to the end and adjust counters. */ sb->sb_lastrecord->m_flags |= M_PUSH; sb->sb_lastrecord->m_nextpkt = mb; sb->sb_lastrecord = mb; if (sb->sb_sndptr == NULL) sb->sb_sndptr = mb; for (; mb; mb = mb->m_next) { sb->sb_mbtail = mb; sballoc(sb, mb); } } /* * Do a send by putting data in output queue and updating urgent * marker if URG set. Possibly send more data. Unlike the other * pru_*() routines, the mbuf chains are our responsibility. We * must either enqueue them or free them. The other pru_* routines * generally are caller-frees. * * This comes from sendfile, normal sends will come from sdp_sosend(). */ static int sdp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { struct sdp_sock *ssk; struct mbuf *n; int error; int cnt; if (nam != NULL) { if (nam->sa_family != AF_INET) { if (control) m_freem(control); m_freem(m); return (EAFNOSUPPORT); } if (nam->sa_len != sizeof(struct sockaddr_in)) { if (control) m_freem(control); m_freem(m); return (EINVAL); } } error = 0; ssk = sdp_sk(so); KASSERT(m->m_flags & M_PKTHDR, ("sdp_send: %p no packet header", m)); M_PREPEND(m, SDP_HEAD_SIZE, M_WAITOK); mtod(m, struct sdp_bsdh *)->mid = SDP_MID_DATA; for (n = m, cnt = 0; n->m_next; n = n->m_next) cnt++; if (cnt > SDP_MAX_SEND_SGES) { n = m_collapse(m, M_WAITOK, SDP_MAX_SEND_SGES); if (n == NULL) { m_freem(m); return (EMSGSIZE); } m = n; for (cnt = 0; n->m_next; n = n->m_next) cnt++; } SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { if (control) m_freem(control); if (m) m_freem(m); error = ECONNRESET; goto out; } if (control) { /* SDP doesn't support control messages. */ if (control->m_len) { m_freem(control); if (m) m_freem(m); error = EINVAL; goto out; } m_freem(control); /* empty control, just free it */ } if (!(flags & PRUS_OOB)) { SOCKBUF_LOCK(&so->so_snd); sdp_append(ssk, &so->so_snd, m, cnt); SOCKBUF_UNLOCK(&so->so_snd); if (nam && ssk->state < TCPS_SYN_SENT) { /* * Do implied connect if not yet connected. */ error = sdp_start_connect(ssk, nam, td); if (error) goto out; } if (flags & PRUS_EOF) { /* * Close the send side of the connection after * the data is sent. */ socantsendmore(so); sdp_usrclosed(ssk); if (!(ssk->flags & SDP_DROPPED)) sdp_output_disconnect(ssk); } else if (!(ssk->flags & SDP_DROPPED) && !(flags & PRUS_MORETOCOME)) sdp_post_sends(ssk, M_NOWAIT); SDP_WUNLOCK(ssk); return (0); } else { SOCKBUF_LOCK(&so->so_snd); if (sbspace(&so->so_snd) < -512) { SOCKBUF_UNLOCK(&so->so_snd); m_freem(m); error = ENOBUFS; goto out; } /* * According to RFC961 (Assigned Protocols), * the urgent pointer points to the last octet * of urgent data. We continue, however, * to consider it to indicate the first octet * of data past the urgent section. * Otherwise, snd_up should be one lower. */ m->m_flags |= M_URG | M_PUSH; sdp_append(ssk, &so->so_snd, m, cnt); SOCKBUF_UNLOCK(&so->so_snd); if (nam && ssk->state < TCPS_SYN_SENT) { /* * Do implied connect if not yet connected. */ error = sdp_start_connect(ssk, nam, td); if (error) goto out; } sdp_post_sends(ssk, M_NOWAIT); SDP_WUNLOCK(ssk); return (0); } out: SDP_WUNLOCK(ssk); return (error); } /* * Send on a socket. If send must go all at once and message is larger than * send buffering, then hard error. Lock against other senders. If must go * all at once and not enough room now, then inform user that this would * block and do nothing. Otherwise, if nonblocking, send as much as * possible. The data to be sent is described by "uio" if nonzero, otherwise * by the mbuf chain "top" (which must be null if uio is not). Data provided * in mbuf chain must be small enough to send all at once. * * Returns nonzero on error, timeout or signal; callers must check for short * counts if EINTR/ERESTART are returned. Data and control buffers are freed * on return. */ static int sdp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { struct sdp_sock *ssk; long space, resid; int atomic; int error; int copy; if (uio != NULL) resid = uio->uio_resid; else resid = top->m_pkthdr.len; atomic = top != NULL; if (control != NULL) { if (control->m_len) { m_freem(control); if (top) m_freem(top); return (EINVAL); } m_freem(control); control = NULL; } /* * In theory resid should be unsigned. However, space must be * signed, as it might be less than 0 if we over-committed, and we * must use a signed comparison of space and resid. On the other * hand, a negative resid causes us to loop sending 0-length * segments to the protocol. * * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM * type sockets since that's an error. */ if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { error = EINVAL; goto out; } if (td != NULL) td->td_ru.ru_msgsnd++; ssk = sdp_sk(so); error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); if (error) goto out; restart: do { SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCKBUF_UNLOCK(&so->so_snd); error = EPIPE; goto release; } if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_snd); goto release; } if ((so->so_state & SS_ISCONNECTED) == 0 && addr == NULL) { SOCKBUF_UNLOCK(&so->so_snd); error = ENOTCONN; goto release; } space = sbspace(&so->so_snd); if (flags & MSG_OOB) space += 1024; if (atomic && resid > ssk->xmit_size_goal - SDP_HEAD_SIZE) { SOCKBUF_UNLOCK(&so->so_snd); error = EMSGSIZE; goto release; } if (space < resid && (atomic || space < so->so_snd.sb_lowat)) { if ((so->so_state & SS_NBIO) || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) { SOCKBUF_UNLOCK(&so->so_snd); error = EWOULDBLOCK; goto release; } error = sbwait(so, SO_SND); SOCKBUF_UNLOCK(&so->so_snd); if (error) goto release; goto restart; } SOCKBUF_UNLOCK(&so->so_snd); do { if (uio == NULL) { resid = 0; if (flags & MSG_EOR) top->m_flags |= M_EOR; } else { /* * Copy the data from userland into a mbuf * chain. If no data is to be copied in, * a single empty mbuf is returned. */ copy = min(space, ssk->xmit_size_goal - SDP_HEAD_SIZE); top = m_uiotombuf(uio, M_WAITOK, copy, 0, M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)); if (top == NULL) { /* only possible error */ error = EFAULT; goto release; } space -= resid - uio->uio_resid; resid = uio->uio_resid; } /* * XXX all the SBS_CANTSENDMORE checks previously * done could be out of date after dropping the * socket lock. */ error = sdp_send(so, (flags & MSG_OOB) ? PRUS_OOB : /* * Set EOF on the last send if the user specified * MSG_EOF. */ ((flags & MSG_EOF) && (resid <= 0)) ? PRUS_EOF : /* If there is more to send set PRUS_MORETOCOME. */ (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, top, addr, NULL, td); top = NULL; if (error) goto release; } while (resid && space > 0); } while (resid); release: SOCK_IO_SEND_UNLOCK(so); out: if (top != NULL) m_freem(top); return (error); } /* * The part of soreceive() that implements reading non-inline out-of-band * data from a socket. For more complete comments, see soreceive(), from * which this code originated. * * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is * unable to return an mbuf chain to the caller. */ static int soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) { struct protosw *pr = so->so_proto; struct mbuf *m; int error; KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); m = m_get(M_WAITOK, MT_DATA); error = pr->pr_rcvoob(so, m, flags & MSG_PEEK); if (error) goto bad; do { error = uiomove(mtod(m, void *), (int) min(uio->uio_resid, m->m_len), uio); m = m_free(m); } while (uio->uio_resid && error == 0 && m); bad: if (m != NULL) m_freem(m); return (error); } /* * Optimized version of soreceive() for stream (TCP) sockets. */ static int sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { int len = 0, error = 0, flags, oresid; struct sockbuf *sb; struct mbuf *m, *n = NULL; struct sdp_sock *ssk; /* We only do stream sockets. */ if (so->so_type != SOCK_STREAM) return (EINVAL); if (psa != NULL) *psa = NULL; if (controlp != NULL) return (EINVAL); if (flagsp != NULL) flags = *flagsp &~ MSG_EOR; else flags = 0; if (flags & MSG_OOB) return (soreceive_rcvoob(so, uio, flags)); if (mp0 != NULL) *mp0 = NULL; sb = &so->so_rcv; ssk = sdp_sk(so); /* Prevent other readers from entering the socket. */ error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (error) return (error); SOCKBUF_LOCK(sb); /* Easy one, no space to copyout anything. */ if (uio->uio_resid == 0) { error = EINVAL; goto out; } oresid = uio->uio_resid; /* We will never ever get anything unless we are connected. */ if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { /* When disconnecting there may be still some data left. */ if (sbavail(sb)) goto deliver; if (!(so->so_state & SS_ISDISCONNECTED)) error = ENOTCONN; goto out; } /* Socket buffer is empty and we shall not block. */ if (sbavail(sb) == 0 && ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) { error = EAGAIN; goto out; } restart: SOCKBUF_LOCK_ASSERT(&so->so_rcv); /* Abort if socket has reported problems. */ if (so->so_error) { if (sbavail(sb)) goto deliver; if (oresid > uio->uio_resid) goto out; error = so->so_error; if (!(flags & MSG_PEEK)) so->so_error = 0; goto out; } /* Door is closed. Deliver what is left, if any. */ if (sb->sb_state & SBS_CANTRCVMORE) { if (sbavail(sb)) goto deliver; else goto out; } /* Socket buffer got some data that we shall deliver now. */ if (sbavail(sb) && !(flags & MSG_WAITALL) && ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)) || sbavail(sb) >= sb->sb_lowat || sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat) ) { goto deliver; } /* On MSG_WAITALL we must wait until all data or error arrives. */ if ((flags & MSG_WAITALL) && (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_lowat)) goto deliver; /* * Wait and block until (more) data comes in. * NB: Drops the sockbuf lock during wait. */ error = sbwait(so, SO_RCV); if (error) goto out; goto restart; deliver: SOCKBUF_LOCK_ASSERT(&so->so_rcv); KASSERT(sbavail(sb), ("%s: sockbuf empty", __func__)); KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__)); /* Statistics. */ if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; /* Fill uio until full or current end of socket buffer is reached. */ len = min(uio->uio_resid, sbavail(sb)); if (mp0 != NULL) { /* Dequeue as many mbufs as possible. */ if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) { for (*mp0 = m = sb->sb_mb; m != NULL && m->m_len <= len; m = m->m_next) { len -= m->m_len; uio->uio_resid -= m->m_len; sbfree(sb, m); n = m; } sb->sb_mb = m; if (sb->sb_mb == NULL) SB_EMPTY_FIXUP(sb); n->m_next = NULL; } /* Copy the remainder. */ if (len > 0) { KASSERT(sb->sb_mb != NULL, ("%s: len > 0 && sb->sb_mb empty", __func__)); m = m_copym(sb->sb_mb, 0, len, M_NOWAIT); if (m == NULL) len = 0; /* Don't flush data from sockbuf. */ else uio->uio_resid -= m->m_len; if (*mp0 != NULL) n->m_next = m; else *mp0 = m; if (*mp0 == NULL) { error = ENOBUFS; goto out; } } } else { /* NB: Must unlock socket buffer as uiomove may sleep. */ SOCKBUF_UNLOCK(sb); error = m_mbuftouio(uio, sb->sb_mb, len); SOCKBUF_LOCK(sb); if (error) goto out; } SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); /* * Remove the delivered data from the socket buffer unless we * were only peeking. */ if (!(flags & MSG_PEEK)) { if (len > 0) sbdrop_locked(sb, len); /* Notify protocol that we drained some data. */ SOCKBUF_UNLOCK(sb); SDP_WLOCK(ssk); sdp_do_posts(ssk); SDP_WUNLOCK(ssk); SOCKBUF_LOCK(sb); } /* * For MSG_WAITALL we may have to loop again and wait for * more data to come in. */ if ((flags & MSG_WAITALL) && uio->uio_resid > 0) goto restart; out: SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); SOCKBUF_UNLOCK(sb); SOCK_IO_RECV_UNLOCK(so); return (error); } /* * Abort is used to teardown a connection typically while sitting in * the accept queue. */ void sdp_abort(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); /* * If we have not yet dropped, do it now. */ if (!(ssk->flags & SDP_TIMEWAIT) && !(ssk->flags & SDP_DROPPED)) sdp_drop(ssk, ECONNABORTED); KASSERT(ssk->flags & SDP_DROPPED, ("sdp_abort: %p not dropped 0x%X", ssk, ssk->flags)); SDP_WUNLOCK(ssk); } /* * Close a SDP socket and initiate a friendly disconnect. */ static void sdp_close(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); /* * If we have not yet dropped, do it now. */ if (!(ssk->flags & SDP_TIMEWAIT) && !(ssk->flags & SDP_DROPPED)) sdp_start_disconnect(ssk); /* * If we've still not dropped let the socket layer know we're * holding on to the socket and pcb for a while. */ if (!(ssk->flags & SDP_DROPPED)) { ssk->flags |= SDP_SOCKREF; soref(so); } SDP_WUNLOCK(ssk); } /* * User requests out-of-band data. */ static int sdp_rcvoob(struct socket *so, struct mbuf *m, int flags) { int error = 0; struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (!rx_ring_trylock(&ssk->rx_ring)) { SDP_WUNLOCK(ssk); return (ECONNRESET); } if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = ECONNRESET; goto out; } if ((so->so_oobmark == 0 && (so->so_rcv.sb_state & SBS_RCVATMARK) == 0) || so->so_options & SO_OOBINLINE || ssk->oobflags & SDP_HADOOB) { error = EINVAL; goto out; } if ((ssk->oobflags & SDP_HAVEOOB) == 0) { error = EWOULDBLOCK; goto out; } m->m_len = 1; *mtod(m, caddr_t) = ssk->iobc; if ((flags & MSG_PEEK) == 0) ssk->oobflags ^= (SDP_HAVEOOB | SDP_HADOOB); out: rx_ring_unlock(&ssk->rx_ring); SDP_WUNLOCK(ssk); return (error); } void sdp_urg(struct sdp_sock *ssk, struct mbuf *mb) { struct mbuf *m; struct socket *so; so = ssk->socket; if (so == NULL) return; so->so_oobmark = sbused(&so->so_rcv) + mb->m_pkthdr.len - 1; sohasoutofband(so); ssk->oobflags &= ~(SDP_HAVEOOB | SDP_HADOOB); if (!(so->so_options & SO_OOBINLINE)) { for (m = mb; m->m_next != NULL; m = m->m_next); ssk->iobc = *(mtod(m, char *) + m->m_len - 1); ssk->oobflags |= SDP_HAVEOOB; m->m_len--; mb->m_pkthdr.len--; } } /* * Notify a sdp socket of an asynchronous error. * * Do not wake up user since there currently is no mechanism for * reporting soft errors (yet - a kqueue filter may be added). */ struct sdp_sock * sdp_notify(struct sdp_sock *ssk, int error) { SDP_WLOCK_ASSERT(ssk); if ((ssk->flags & SDP_TIMEWAIT) || (ssk->flags & SDP_DROPPED)) return (ssk); /* * Ignore some errors if we are hooked up. */ if (ssk->state == TCPS_ESTABLISHED && (error == EHOSTUNREACH || error == ENETUNREACH || error == EHOSTDOWN)) return (ssk); ssk->softerror = error; return sdp_drop(ssk, error); } static void sdp_keepalive_timeout(void *data) { struct sdp_sock *ssk; ssk = data; /* Callout canceled. */ if (!callout_active(&ssk->keep2msl)) return; /* Callout rescheduled as a different kind of timer. */ if (callout_pending(&ssk->keep2msl)) goto out; callout_deactivate(&ssk->keep2msl); if (ssk->flags & SDP_DROPPED || (ssk->socket->so_options & SO_KEEPALIVE) == 0) goto out; sdp_post_keepalive(ssk); callout_reset(&ssk->keep2msl, SDP_KEEPALIVE_TIME, sdp_keepalive_timeout, ssk); out: SDP_WUNLOCK(ssk); } void sdp_start_keepalive_timer(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); if (!callout_pending(&ssk->keep2msl)) callout_reset(&ssk->keep2msl, SDP_KEEPALIVE_TIME, sdp_keepalive_timeout, ssk); } static void sdp_stop_keepalive_timer(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); callout_stop(&ssk->keep2msl); } /* * sdp_ctloutput() must drop the inpcb lock before performing copyin on * socket option arguments. When it re-acquires the lock after the copy, it * has to revalidate that the connection is still valid for the socket * option. */ #define SDP_WLOCK_RECHECK(inp) do { \ SDP_WLOCK(ssk); \ if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { \ SDP_WUNLOCK(ssk); \ return (ECONNRESET); \ } \ } while(0) static int sdp_ctloutput(struct socket *so, struct sockopt *sopt) { int error, opt, optval; struct sdp_sock *ssk; error = 0; ssk = sdp_sk(so); if (sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_KEEPALIVE) { SDP_WLOCK(ssk); if (so->so_options & SO_KEEPALIVE) sdp_start_keepalive_timer(so); else sdp_stop_keepalive_timer(so); SDP_WUNLOCK(ssk); } if (sopt->sopt_level != IPPROTO_TCP) return (error); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { SDP_WUNLOCK(ssk); return (ECONNRESET); } switch (sopt->sopt_dir) { case SOPT_SET: switch (sopt->sopt_name) { case TCP_NODELAY: SDP_WUNLOCK(ssk); error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) return (error); SDP_WLOCK_RECHECK(ssk); opt = SDP_NODELAY; if (optval) ssk->flags |= opt; else ssk->flags &= ~opt; sdp_do_posts(ssk); SDP_WUNLOCK(ssk); break; default: SDP_WUNLOCK(ssk); error = ENOPROTOOPT; break; } break; case SOPT_GET: switch (sopt->sopt_name) { case TCP_NODELAY: optval = ssk->flags & SDP_NODELAY; SDP_WUNLOCK(ssk); error = sooptcopyout(sopt, &optval, sizeof optval); break; default: SDP_WUNLOCK(ssk); error = ENOPROTOOPT; break; } break; } return (error); } #undef SDP_WLOCK_RECHECK int sdp_mod_count = 0; int sdp_mod_usec = 0; void sdp_set_default_moderation(struct sdp_sock *ssk) { if (sdp_mod_count <= 0 || sdp_mod_usec <= 0) return; ib_modify_cq(ssk->rx_ring.cq, sdp_mod_count, sdp_mod_usec); } static void sdp_dev_add(struct ib_device *device) { struct ib_fmr_pool_param param; struct sdp_device *sdp_dev; sdp_dev = malloc(sizeof(*sdp_dev), M_SDP, M_WAITOK | M_ZERO); sdp_dev->pd = ib_alloc_pd(device, 0); if (IS_ERR(sdp_dev->pd)) goto out_pd; memset(¶m, 0, sizeof param); param.max_pages_per_fmr = SDP_FMR_SIZE; param.page_shift = PAGE_SHIFT; param.access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ); param.pool_size = SDP_FMR_POOL_SIZE; param.dirty_watermark = SDP_FMR_DIRTY_SIZE; param.cache = 1; sdp_dev->fmr_pool = ib_create_fmr_pool(sdp_dev->pd, ¶m); if (IS_ERR(sdp_dev->fmr_pool)) goto out_fmr; ib_set_client_data(device, &sdp_client, sdp_dev); return; out_fmr: ib_dealloc_pd(sdp_dev->pd); out_pd: free(sdp_dev, M_SDP); } static void sdp_dev_rem(struct ib_device *device, void *client_data) { struct sdp_device *sdp_dev; struct sdp_sock *ssk; SDP_LIST_WLOCK(); LIST_FOREACH(ssk, &sdp_list, list) { if (ssk->ib_device != device) continue; SDP_WLOCK(ssk); if ((ssk->flags & SDP_DESTROY) == 0) ssk = sdp_notify(ssk, ECONNRESET); if (ssk) SDP_WUNLOCK(ssk); } SDP_LIST_WUNLOCK(); /* * XXX Do I need to wait between these two? */ sdp_dev = ib_get_client_data(device, &sdp_client); if (!sdp_dev) return; ib_flush_fmr_pool(sdp_dev->fmr_pool); ib_destroy_fmr_pool(sdp_dev->fmr_pool); ib_dealloc_pd(sdp_dev->pd); free(sdp_dev, M_SDP); } struct ib_client sdp_client = { .name = "sdp", .add = sdp_dev_add, .remove = sdp_dev_rem }; static int sdp_pcblist(SYSCTL_HANDLER_ARGS) { int error, n, i; struct sdp_sock *ssk; struct xinpgen xig; /* * The process of preparing the TCB list is too time-consuming and * resource-intensive to repeat twice on every request. */ if (req->oldptr == NULL) { n = sdp_count; n += imax(n / 8, 10); req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb); return (0); } if (req->newptr != NULL) return (EPERM); /* * OK, now we're committed to doing something. */ SDP_LIST_RLOCK(); n = sdp_count; SDP_LIST_RUNLOCK(); error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) + n * sizeof(struct xtcpcb)); if (error != 0) return (error); bzero(&xig, sizeof(xig)); xig.xig_len = sizeof xig; xig.xig_count = n; xig.xig_gen = 0; xig.xig_sogen = so_gencnt; error = SYSCTL_OUT(req, &xig, sizeof xig); if (error) return (error); SDP_LIST_RLOCK(); for (ssk = LIST_FIRST(&sdp_list), i = 0; ssk != NULL && i < n; ssk = LIST_NEXT(ssk, list)) { struct xtcpcb xt; SDP_RLOCK(ssk); if (ssk->flags & SDP_TIMEWAIT) { if (ssk->cred != NULL) error = cr_cansee(req->td->td_ucred, ssk->cred); else error = EINVAL; /* Skip this inp. */ } else if (ssk->socket) error = cr_canseesocket(req->td->td_ucred, ssk->socket); else error = EINVAL; if (error) { error = 0; goto next; } bzero(&xt, sizeof(xt)); xt.xt_len = sizeof xt; xt.xt_inp.inp_gencnt = 0; xt.xt_inp.inp_vflag = INP_IPV4; memcpy(&xt.xt_inp.inp_laddr, &ssk->laddr, sizeof(ssk->laddr)); xt.xt_inp.inp_lport = ssk->lport; memcpy(&xt.xt_inp.inp_faddr, &ssk->faddr, sizeof(ssk->faddr)); xt.xt_inp.inp_fport = ssk->fport; xt.t_state = ssk->state; if (ssk->socket != NULL) sotoxsocket(ssk->socket, &xt.xt_inp.xi_socket); xt.xt_inp.xi_socket.xso_protocol = IPPROTO_TCP; SDP_RUNLOCK(ssk); error = SYSCTL_OUT(req, &xt, sizeof xt); if (error) break; i++; continue; next: SDP_RUNLOCK(ssk); } if (!error) { /* * Give the user an updated idea of our state. * If the generation differs from what we told * her before, she knows that something happened * while we were processing this request, and it * might be necessary to retry. */ xig.xig_gen = 0; xig.xig_sogen = so_gencnt; xig.xig_count = sdp_count; error = SYSCTL_OUT(req, &xig, sizeof xig); } SDP_LIST_RUNLOCK(); return (error); } SYSCTL_NODE(_net_inet, -1, sdp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "SDP"); SYSCTL_PROC(_net_inet_sdp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0, sdp_pcblist, "S,xtcpcb", "List of active SDP connections"); static void sdp_zone_change(void *tag) { uma_zone_set_max(sdp_zone, maxsockets); } static void sdp_init(void *arg __unused) { LIST_INIT(&sdp_list); sdp_zone = uma_zcreate("sdp_sock", sizeof(struct sdp_sock), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); uma_zone_set_max(sdp_zone, maxsockets); EVENTHANDLER_REGISTER(maxsockets_change, sdp_zone_change, NULL, EVENTHANDLER_PRI_ANY); rx_comp_wq = create_singlethread_workqueue("rx_comp_wq"); ib_register_client(&sdp_client); } SYSINIT(sdp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, sdp_init, NULL); #define SDP_PROTOSW \ .pr_type = SOCK_STREAM, \ .pr_flags = PR_CONNREQUIRED|PR_IMPLOPCL|PR_WANTRCVD,\ .pr_ctloutput = sdp_ctloutput, \ .pr_abort = sdp_abort, \ .pr_accept = sdp_accept, \ .pr_attach = sdp_attach, \ .pr_bind = sdp_bind, \ .pr_connect = sdp_connect, \ .pr_detach = sdp_detach, \ .pr_disconnect = sdp_disconnect, \ .pr_listen = sdp_listen, \ .pr_peeraddr = sdp_getpeeraddr, \ .pr_rcvoob = sdp_rcvoob, \ .pr_send = sdp_send, \ .pr_sosend = sdp_sosend, \ .pr_soreceive = sdp_sorecv, \ .pr_shutdown = sdp_shutdown, \ .pr_sockaddr = sdp_getsockaddr, \ .pr_close = sdp_close static struct protosw sdp_ip_protosw = { .pr_protocol = IPPROTO_IP, SDP_PROTOSW }; static struct protosw sdp_tcp_protosw = { .pr_protocol = IPPROTO_TCP, SDP_PROTOSW }; static struct domain sdpdomain = { .dom_family = AF_INET_SDP, .dom_name = "SDP", .dom_nprotosw = 2, .dom_protosw = { &sdp_ip_protosw, &sdp_tcp_protosw, }, }; DOMAIN_SET(sdp); int sdp_debug_level = 1; int sdp_data_debug_level = 0; diff --git a/sys/rpc/rpc_generic.c b/sys/rpc/rpc_generic.c index 4ec06197d57c..0d6c0f1b7c7d 100644 --- a/sys/rpc/rpc_generic.c +++ b/sys/rpc/rpc_generic.c @@ -1,987 +1,974 @@ /* $NetBSD: rpc_generic.c,v 1.4 2000/09/28 09:07:04 kleink Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1986-1991 by Sun Microsystems Inc. */ #include /* * rpc_generic.c, Miscl routines for RPC. * */ #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern u_long sb_max_adj; /* not defined in socketvar.h */ /* Provide an entry point hook for the rpcsec_gss module. */ struct rpc_gss_entries rpc_gss_entries; struct handle { NCONF_HANDLE *nhandle; int nflag; /* Whether NETPATH or NETCONFIG */ int nettype; }; static const struct _rpcnettype { const char *name; const int type; } _rpctypelist[] = { { "netpath", _RPC_NETPATH }, { "visible", _RPC_VISIBLE }, { "circuit_v", _RPC_CIRCUIT_V }, { "datagram_v", _RPC_DATAGRAM_V }, { "circuit_n", _RPC_CIRCUIT_N }, { "datagram_n", _RPC_DATAGRAM_N }, { "tcp", _RPC_TCP }, { "udp", _RPC_UDP }, { 0, _RPC_NONE } }; struct netid_af { const char *netid; int af; int protocol; }; static const struct netid_af na_cvt[] = { { "udp", AF_INET, IPPROTO_UDP }, { "tcp", AF_INET, IPPROTO_TCP }, #ifdef INET6 { "udp6", AF_INET6, IPPROTO_UDP }, { "tcp6", AF_INET6, IPPROTO_TCP }, #endif { "local", AF_LOCAL, 0 } }; struct rpc_createerr rpc_createerr; /* * Find the appropriate buffer size */ u_int /*ARGSUSED*/ __rpc_get_t_size(int af, int proto, int size) { int defsize; switch (proto) { case IPPROTO_TCP: defsize = 64 * 1024; /* XXX */ break; case IPPROTO_UDP: defsize = UDPMSGSIZE; break; default: defsize = RPC_MAXDATASIZE; break; } if (size == 0) return defsize; /* Check whether the value is within the upper max limit */ return (size > sb_max_adj ? (u_int)sb_max_adj : (u_int)size); } /* * Find the appropriate address buffer size */ u_int __rpc_get_a_size(int af) { switch (af) { case AF_INET: return sizeof (struct sockaddr_in); #ifdef INET6 case AF_INET6: return sizeof (struct sockaddr_in6); #endif case AF_LOCAL: return sizeof (struct sockaddr_un); default: break; } return ((u_int)RPC_MAXADDRSIZE); } #if 0 /* * Used to ping the NULL procedure for clnt handle. * Returns NULL if fails, else a non-NULL pointer. */ void * rpc_nullproc(clnt) CLIENT *clnt; { struct timeval TIMEOUT = {25, 0}; if (clnt_call(clnt, NULLPROC, (xdrproc_t) xdr_void, NULL, (xdrproc_t) xdr_void, NULL, TIMEOUT) != RPC_SUCCESS) { return (NULL); } return ((void *) clnt); } #endif int __rpc_socket2sockinfo(struct socket *so, struct __rpc_sockinfo *sip) { int type, proto; - struct sockaddr *sa; + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; sa_family_t family; struct sockopt opt; int error; - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_sockaddr(so, &sa); - CURVNET_RESTORE(); + error = sosockaddr(so, (struct sockaddr *)&ss); if (error) return 0; - sip->si_alen = sa->sa_len; - family = sa->sa_family; - free(sa, M_SONAME); + sip->si_alen = ss.ss_len; + family = ss.ss_family; opt.sopt_dir = SOPT_GET; opt.sopt_level = SOL_SOCKET; opt.sopt_name = SO_TYPE; opt.sopt_val = &type; opt.sopt_valsize = sizeof type; opt.sopt_td = NULL; error = sogetopt(so, &opt); if (error) return 0; /* XXX */ if (family != AF_LOCAL) { if (type == SOCK_STREAM) proto = IPPROTO_TCP; else if (type == SOCK_DGRAM) proto = IPPROTO_UDP; else return 0; } else proto = 0; sip->si_af = family; sip->si_proto = proto; sip->si_socktype = type; return 1; } /* * Linear search, but the number of entries is small. */ int __rpc_nconf2sockinfo(const struct netconfig *nconf, struct __rpc_sockinfo *sip) { int i; for (i = 0; i < (sizeof na_cvt) / (sizeof (struct netid_af)); i++) if (strcmp(na_cvt[i].netid, nconf->nc_netid) == 0 || ( strcmp(nconf->nc_netid, "unix") == 0 && strcmp(na_cvt[i].netid, "local") == 0)) { sip->si_af = na_cvt[i].af; sip->si_proto = na_cvt[i].protocol; sip->si_socktype = __rpc_seman2socktype((int)nconf->nc_semantics); if (sip->si_socktype == -1) return 0; sip->si_alen = __rpc_get_a_size(sip->si_af); return 1; } return 0; } struct socket * __rpc_nconf2socket(const struct netconfig *nconf) { struct __rpc_sockinfo si; struct socket *so; int error; if (!__rpc_nconf2sockinfo(nconf, &si)) return 0; so = NULL; error = socreate(si.si_af, &so, si.si_socktype, si.si_proto, curthread->td_ucred, curthread); if (error) return NULL; else return so; } char * taddr2uaddr(const struct netconfig *nconf, const struct netbuf *nbuf) { struct __rpc_sockinfo si; if (!__rpc_nconf2sockinfo(nconf, &si)) return NULL; return __rpc_taddr2uaddr_af(si.si_af, nbuf); } struct netbuf * uaddr2taddr(const struct netconfig *nconf, const char *uaddr) { struct __rpc_sockinfo si; if (!__rpc_nconf2sockinfo(nconf, &si)) return NULL; return __rpc_uaddr2taddr_af(si.si_af, uaddr); } char * __rpc_taddr2uaddr_af(int af, const struct netbuf *nbuf) { char *ret; struct sbuf sb; struct sockaddr_in *sin; struct sockaddr_un *sun; char namebuf[INET_ADDRSTRLEN]; #ifdef INET6 struct sockaddr_in6 *sin6; char namebuf6[INET6_ADDRSTRLEN]; #endif uint16_t port; sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND); switch (af) { case AF_INET: if (nbuf->len < sizeof(*sin)) return NULL; sin = nbuf->buf; if (inet_ntop(af, &sin->sin_addr, namebuf, sizeof namebuf) == NULL) return NULL; port = ntohs(sin->sin_port); if (sbuf_printf(&sb, "%s.%u.%u", namebuf, ((uint32_t)port) >> 8, port & 0xff) < 0) return NULL; break; #ifdef INET6 case AF_INET6: if (nbuf->len < sizeof(*sin6)) return NULL; sin6 = nbuf->buf; if (inet_ntop(af, &sin6->sin6_addr, namebuf6, sizeof namebuf6) == NULL) return NULL; port = ntohs(sin6->sin6_port); if (sbuf_printf(&sb, "%s.%u.%u", namebuf6, ((uint32_t)port) >> 8, port & 0xff) < 0) return NULL; break; #endif case AF_LOCAL: sun = nbuf->buf; if (sbuf_printf(&sb, "%.*s", (int)(sun->sun_len - offsetof(struct sockaddr_un, sun_path)), sun->sun_path) < 0) return (NULL); break; default: return NULL; } sbuf_finish(&sb); ret = strdup(sbuf_data(&sb), M_RPC); sbuf_delete(&sb); return ret; } struct netbuf * __rpc_uaddr2taddr_af(int af, const char *uaddr) { struct netbuf *ret = NULL; char *addrstr, *p; unsigned port, portlo, porthi; struct sockaddr_in *sin; #ifdef INET6 struct sockaddr_in6 *sin6; #endif struct sockaddr_un *sun; port = 0; sin = NULL; if (uaddr == NULL) return NULL; addrstr = strdup(uaddr, M_RPC); if (addrstr == NULL) return NULL; /* * AF_LOCAL addresses are expected to be absolute * pathnames, anything else will be AF_INET or AF_INET6. */ if (*addrstr != '/') { p = strrchr(addrstr, '.'); if (p == NULL) goto out; portlo = (unsigned)strtol(p + 1, NULL, 10); *p = '\0'; p = strrchr(addrstr, '.'); if (p == NULL) goto out; porthi = (unsigned)strtol(p + 1, NULL, 10); *p = '\0'; port = (porthi << 8) | portlo; } ret = (struct netbuf *)malloc(sizeof *ret, M_RPC, M_WAITOK); switch (af) { case AF_INET: sin = (struct sockaddr_in *)malloc(sizeof *sin, M_RPC, M_WAITOK); memset(sin, 0, sizeof *sin); sin->sin_family = AF_INET; sin->sin_port = htons(port); if (inet_pton(AF_INET, addrstr, &sin->sin_addr) <= 0) { free(sin, M_RPC); free(ret, M_RPC); ret = NULL; goto out; } sin->sin_len = ret->maxlen = ret->len = sizeof *sin; ret->buf = sin; break; #ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)malloc(sizeof *sin6, M_RPC, M_WAITOK); memset(sin6, 0, sizeof *sin6); sin6->sin6_family = AF_INET6; sin6->sin6_port = htons(port); if (inet_pton(AF_INET6, addrstr, &sin6->sin6_addr) <= 0) { free(sin6, M_RPC); free(ret, M_RPC); ret = NULL; goto out; } sin6->sin6_len = ret->maxlen = ret->len = sizeof *sin6; ret->buf = sin6; break; #endif case AF_LOCAL: sun = (struct sockaddr_un *)malloc(sizeof *sun, M_RPC, M_WAITOK); memset(sun, 0, sizeof *sun); sun->sun_family = AF_LOCAL; strncpy(sun->sun_path, addrstr, sizeof(sun->sun_path) - 1); ret->len = ret->maxlen = sun->sun_len = SUN_LEN(sun); ret->buf = sun; break; default: break; } out: free(addrstr, M_RPC); return ret; } int __rpc_seman2socktype(int semantics) { switch (semantics) { case NC_TPI_CLTS: return SOCK_DGRAM; case NC_TPI_COTS_ORD: return SOCK_STREAM; case NC_TPI_RAW: return SOCK_RAW; default: break; } return -1; } int __rpc_socktype2seman(int socktype) { switch (socktype) { case SOCK_DGRAM: return NC_TPI_CLTS; case SOCK_STREAM: return NC_TPI_COTS_ORD; case SOCK_RAW: return NC_TPI_RAW; default: break; } return -1; } /* * Returns the type of the network as defined in * If nettype is NULL, it defaults to NETPATH. */ static int getnettype(const char *nettype) { int i; if ((nettype == NULL) || (nettype[0] == 0)) { return (_RPC_NETPATH); /* Default */ } #if 0 nettype = strlocase(nettype); #endif for (i = 0; _rpctypelist[i].name; i++) if (strcasecmp(nettype, _rpctypelist[i].name) == 0) { return (_rpctypelist[i].type); } return (_rpctypelist[i].type); } /* * For the given nettype (tcp or udp only), return the first structure found. * This should be freed by calling freenetconfigent() */ struct netconfig * __rpc_getconfip(const char *nettype) { char *netid; static char *netid_tcp = (char *) NULL; static char *netid_udp = (char *) NULL; struct netconfig *dummy; if (!netid_udp && !netid_tcp) { struct netconfig *nconf; void *confighandle; if (!(confighandle = setnetconfig())) { log(LOG_ERR, "rpc: failed to open " NETCONFIG); return (NULL); } while ((nconf = getnetconfig(confighandle)) != NULL) { if (strcmp(nconf->nc_protofmly, NC_INET) == 0) { if (strcmp(nconf->nc_proto, NC_TCP) == 0) { netid_tcp = strdup(nconf->nc_netid, M_RPC); } else if (strcmp(nconf->nc_proto, NC_UDP) == 0) { netid_udp = strdup(nconf->nc_netid, M_RPC); } } } endnetconfig(confighandle); } if (strcmp(nettype, "udp") == 0) netid = netid_udp; else if (strcmp(nettype, "tcp") == 0) netid = netid_tcp; else { return (NULL); } if ((netid == NULL) || (netid[0] == 0)) { return (NULL); } dummy = getnetconfigent(netid); return (dummy); } /* * Returns the type of the nettype, which should then be used with * __rpc_getconf(). * * For simplicity in the kernel, we don't support the NETPATH * environment variable. We behave as userland would then NETPATH is * unset, i.e. iterate over all visible entries in netconfig. */ void * __rpc_setconf(const char *nettype) { struct handle *handle; handle = (struct handle *) malloc(sizeof (struct handle), M_RPC, M_WAITOK); switch (handle->nettype = getnettype(nettype)) { case _RPC_NETPATH: case _RPC_CIRCUIT_N: case _RPC_DATAGRAM_N: if (!(handle->nhandle = setnetconfig())) goto failed; handle->nflag = TRUE; break; case _RPC_VISIBLE: case _RPC_CIRCUIT_V: case _RPC_DATAGRAM_V: case _RPC_TCP: case _RPC_UDP: if (!(handle->nhandle = setnetconfig())) { log(LOG_ERR, "rpc: failed to open " NETCONFIG); goto failed; } handle->nflag = FALSE; break; default: goto failed; } return (handle); failed: free(handle, M_RPC); return (NULL); } /* * Returns the next netconfig struct for the given "net" type. * __rpc_setconf() should have been called previously. */ struct netconfig * __rpc_getconf(void *vhandle) { struct handle *handle; struct netconfig *nconf; handle = (struct handle *)vhandle; if (handle == NULL) { return (NULL); } for (;;) { if (handle->nflag) { nconf = getnetconfig(handle->nhandle); if (nconf && !(nconf->nc_flag & NC_VISIBLE)) continue; } else { nconf = getnetconfig(handle->nhandle); } if (nconf == NULL) break; if ((nconf->nc_semantics != NC_TPI_CLTS) && (nconf->nc_semantics != NC_TPI_COTS) && (nconf->nc_semantics != NC_TPI_COTS_ORD)) continue; switch (handle->nettype) { case _RPC_VISIBLE: if (!(nconf->nc_flag & NC_VISIBLE)) continue; /* FALLTHROUGH */ case _RPC_NETPATH: /* Be happy */ break; case _RPC_CIRCUIT_V: if (!(nconf->nc_flag & NC_VISIBLE)) continue; /* FALLTHROUGH */ case _RPC_CIRCUIT_N: if ((nconf->nc_semantics != NC_TPI_COTS) && (nconf->nc_semantics != NC_TPI_COTS_ORD)) continue; break; case _RPC_DATAGRAM_V: if (!(nconf->nc_flag & NC_VISIBLE)) continue; /* FALLTHROUGH */ case _RPC_DATAGRAM_N: if (nconf->nc_semantics != NC_TPI_CLTS) continue; break; case _RPC_TCP: if (((nconf->nc_semantics != NC_TPI_COTS) && (nconf->nc_semantics != NC_TPI_COTS_ORD)) || (strcmp(nconf->nc_protofmly, NC_INET) #ifdef INET6 && strcmp(nconf->nc_protofmly, NC_INET6)) #else ) #endif || strcmp(nconf->nc_proto, NC_TCP)) continue; break; case _RPC_UDP: if ((nconf->nc_semantics != NC_TPI_CLTS) || (strcmp(nconf->nc_protofmly, NC_INET) #ifdef INET6 && strcmp(nconf->nc_protofmly, NC_INET6)) #else ) #endif || strcmp(nconf->nc_proto, NC_UDP)) continue; break; } break; } return (nconf); } void __rpc_endconf(void *vhandle) { struct handle *handle; handle = (struct handle *) vhandle; if (handle == NULL) { return; } endnetconfig(handle->nhandle); free(handle, M_RPC); } int __rpc_sockisbound(struct socket *so) { - struct sockaddr *sa; + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; int error, bound; - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_sockaddr(so, &sa); - CURVNET_RESTORE(); + error = sosockaddr(so, (struct sockaddr *)&ss); if (error) return (0); - switch (sa->sa_family) { + switch (ss.ss_family) { case AF_INET: - bound = (((struct sockaddr_in *) sa)->sin_port != 0); + bound = (((struct sockaddr_in *)&ss)->sin_port != 0); break; #ifdef INET6 case AF_INET6: - bound = (((struct sockaddr_in6 *) sa)->sin6_port != 0); + bound = (((struct sockaddr_in6 *)&ss)->sin6_port != 0); break; #endif case AF_LOCAL: /* XXX check this */ - bound = (((struct sockaddr_un *) sa)->sun_path[0] != '\0'); + bound = (((struct sockaddr_un *)&ss)->sun_path[0] != '\0'); break; default: bound = FALSE; break; } - free(sa, M_SONAME); - return bound; } /* * Implement XDR-style API for RPC call. */ enum clnt_stat clnt_call_private( CLIENT *cl, /* client handle */ struct rpc_callextra *ext, /* call metadata */ rpcproc_t proc, /* procedure number */ xdrproc_t xargs, /* xdr routine for args */ void *argsp, /* pointer to args */ xdrproc_t xresults, /* xdr routine for results */ void *resultsp, /* pointer to results */ struct timeval utimeout) /* seconds to wait before giving up */ { XDR xdrs; struct mbuf *mreq; struct mbuf *mrep; enum clnt_stat stat; mreq = m_getcl(M_WAITOK, MT_DATA, 0); xdrmbuf_create(&xdrs, mreq, XDR_ENCODE); if (!xargs(&xdrs, argsp)) { m_freem(mreq); return (RPC_CANTENCODEARGS); } XDR_DESTROY(&xdrs); stat = CLNT_CALL_MBUF(cl, ext, proc, mreq, &mrep, utimeout); m_freem(mreq); if (stat == RPC_SUCCESS) { xdrmbuf_create(&xdrs, mrep, XDR_DECODE); if (!xresults(&xdrs, resultsp)) { XDR_DESTROY(&xdrs); return (RPC_CANTDECODERES); } XDR_DESTROY(&xdrs); } return (stat); } /* * Bind a socket to a privileged IP port */ int bindresvport(struct socket *so, struct sockaddr *sa) { + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; int old, error, af; - bool_t freesa = FALSE; struct sockaddr_in *sin; #ifdef INET6 struct sockaddr_in6 *sin6; #endif struct sockopt opt; int proto, portrange, portlow; uint16_t *portp; socklen_t salen; if (sa == NULL) { - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_sockaddr(so, &sa); - CURVNET_RESTORE(); + sa = (struct sockaddr *)&ss; + error = sosockaddr(so, sa); if (error) return (error); - freesa = TRUE; af = sa->sa_family; salen = sa->sa_len; memset(sa, 0, sa->sa_len); } else { af = sa->sa_family; salen = sa->sa_len; } switch (af) { case AF_INET: proto = IPPROTO_IP; portrange = IP_PORTRANGE; portlow = IP_PORTRANGE_LOW; sin = (struct sockaddr_in *)sa; portp = &sin->sin_port; break; #ifdef INET6 case AF_INET6: proto = IPPROTO_IPV6; portrange = IPV6_PORTRANGE; portlow = IPV6_PORTRANGE_LOW; sin6 = (struct sockaddr_in6 *)sa; portp = &sin6->sin6_port; break; #endif default: return (EPFNOSUPPORT); } sa->sa_family = af; sa->sa_len = salen; if (*portp == 0) { bzero(&opt, sizeof(opt)); opt.sopt_dir = SOPT_GET; opt.sopt_level = proto; opt.sopt_name = portrange; opt.sopt_val = &old; opt.sopt_valsize = sizeof(old); error = sogetopt(so, &opt); - if (error) { - goto out; - } + if (error) + return (error); opt.sopt_dir = SOPT_SET; opt.sopt_val = &portlow; error = sosetopt(so, &opt); if (error) - goto out; + return (error); } error = sobind(so, sa, curthread); if (*portp == 0) { if (error) { opt.sopt_dir = SOPT_SET; opt.sopt_val = &old; sosetopt(so, &opt); } } -out: - if (freesa) - free(sa, M_SONAME); return (error); } /* * Make sure an mbuf list is made up entirely of ext_pgs mbufs. * This is needed for sosend() when KERN_TLS is being used. * (There might also be a performance improvement for certain * network interfaces that handle ext_pgs mbufs efficiently.) * It expects at least one non-ext_pgs mbuf followed by zero * or more ext_pgs mbufs. It does not handle the case where * non-ext_pgs mbuf(s) follow ext_pgs ones. * It also performs sanity checks on the resultant list. * The "mp" argument list is consumed. * The "maxextsiz" argument is the upper bound on the data * size for each mbuf (usually 16K for KERN_TLS). */ struct mbuf * _rpc_copym_into_ext_pgs(struct mbuf *mp, int maxextsiz) { struct mbuf *m, *m2, *m3, *mhead; int tlen; KASSERT((mp->m_flags & (M_EXT | M_EXTPG)) != (M_EXT | M_EXTPG), ("_rpc_copym_into_ext_pgs:" " first mbuf is an ext_pgs")); /* * Find the last non-ext_pgs mbuf and the total * length of the non-ext_pgs mbuf(s). * The first mbuf must always be a non-ext_pgs * mbuf. */ tlen = mp->m_len; m2 = mp; for (m = mp->m_next; m != NULL; m = m->m_next) { if ((m->m_flags & M_EXTPG) != 0) break; tlen += m->m_len; m2 = m; } /* * Copy the non-ext_pgs mbuf(s) into an ext_pgs * mbuf list. */ m2->m_next = NULL; mhead = mb_mapped_to_unmapped(mp, tlen, maxextsiz, M_WAITOK, &m2); /* * Link the ext_pgs list onto the newly copied * list and free up the non-ext_pgs mbuf(s). */ m2->m_next = m; m_freem(mp); /* * Sanity check the resultant mbuf list. Check for and * remove any 0 length mbufs in the list, since the * KERN_TLS code does not expect any 0 length mbuf(s) * in the list. */ m3 = NULL; m2 = mhead; tlen = 0; while (m2 != NULL) { KASSERT(m2->m_len >= 0, ("_rpc_copym_into_ext_pgs:" " negative m_len")); KASSERT((m2->m_flags & (M_EXT | M_EXTPG)) == (M_EXT | M_EXTPG), ("_rpc_copym_into_ext_pgs:" " non-nomap mbuf in list")); if (m2->m_len == 0) { if (m3 != NULL) m3->m_next = m2->m_next; else m = m2->m_next; m2->m_next = NULL; m_free(m2); if (m3 != NULL) m2 = m3->m_next; else m2 = m; } else { MBUF_EXT_PGS_ASSERT_SANITY(m2); m3 = m2; tlen += m2->m_len; m2 = m2->m_next; } } return (mhead); } /* * Kernel module glue */ static int krpc_modevent(module_t mod, int type, void *data) { int error = 0; switch (type) { case MOD_LOAD: error = rpctls_init(); break; case MOD_UNLOAD: /* * Cannot be unloaded, since the rpctlssd or rpctlscd daemons * might be performing a rpctls syscall. */ /* FALLTHROUGH */ default: error = EOPNOTSUPP; } return (error); } static moduledata_t krpc_mod = { "krpc", krpc_modevent, NULL, }; DECLARE_MODULE(krpc, krpc_mod, SI_SUB_VFS, SI_ORDER_ANY); /* So that loader and kldload(2) can find us, wherever we are.. */ MODULE_VERSION(krpc, 1); MODULE_DEPEND(krpc, xdr, 1, 1, 1); diff --git a/sys/rpc/svc_dg.c b/sys/rpc/svc_dg.c index 8e5634b92d8d..d77a727e820d 100644 --- a/sys/rpc/svc_dg.c +++ b/sys/rpc/svc_dg.c @@ -1,302 +1,297 @@ /* $NetBSD: svc_dg.c,v 1.4 2000/07/06 03:10:35 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1986-1991 by Sun Microsystems Inc. */ /* * svc_dg.c, Server side for connectionless RPC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static enum xprt_stat svc_dg_stat(SVCXPRT *); static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); static void svc_dg_destroy(SVCXPRT *); static bool_t svc_dg_control(SVCXPRT *, const u_int, void *); static int svc_dg_soupcall(struct socket *so, void *arg, int waitflag); static const struct xp_ops svc_dg_ops = { .xp_recv = svc_dg_recv, .xp_stat = svc_dg_stat, .xp_reply = svc_dg_reply, .xp_destroy = svc_dg_destroy, .xp_control = svc_dg_control, }; /* * Usage: * xprt = svc_dg_create(sock, sendsize, recvsize); * Does other connectionless specific initializations. * Once *xprt is initialized, it is registered. * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable * system defaults are chosen. * The routines returns NULL if a problem occurred. */ static const char svc_dg_str[] = "svc_dg_create: %s"; static const char svc_dg_err1[] = "could not get transport information"; static const char svc_dg_err2[] = "transport does not support data transfer"; static const char __no_mem_str[] = "out of memory"; SVCXPRT * svc_dg_create(SVCPOOL *pool, struct socket *so, size_t sendsize, size_t recvsize) { SVCXPRT *xprt; struct __rpc_sockinfo si; - struct sockaddr* sa; int error; if (jailed(curthread->td_ucred)) return (NULL); if (!__rpc_socket2sockinfo(so, &si)) { printf(svc_dg_str, svc_dg_err1); return (NULL); } /* * Find the receive and the send size */ sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize); recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize); if ((sendsize == 0) || (recvsize == 0)) { printf(svc_dg_str, svc_dg_err2); return (NULL); } xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = NULL; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_dg_ops; - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_sockaddr(so, &sa); - CURVNET_RESTORE(); + xprt->xp_ltaddr.ss_len = sizeof(xprt->xp_ltaddr); + error = sosockaddr(so, (struct sockaddr *)&xprt->xp_ltaddr); if (error) goto freedata; - memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); - free(sa, M_SONAME); - xprt_register(xprt); SOCKBUF_LOCK(&so->so_rcv); soupcall_set(so, SO_RCV, svc_dg_soupcall, xprt); SOCKBUF_UNLOCK(&so->so_rcv); return (xprt); freedata: (void) printf(svc_dg_str, __no_mem_str); svc_xprt_free(xprt); return (NULL); } /*ARGSUSED*/ static enum xprt_stat svc_dg_stat(SVCXPRT *xprt) { if (soreadable(xprt->xp_socket)) return (XPRT_MOREREQS); return (XPRT_IDLE); } static bool_t svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct uio uio; struct sockaddr *raddr; struct mbuf *mreq; XDR xdrs; int error, rcvflag; /* * Serialise access to the socket. */ sx_xlock(&xprt->xp_lock); /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to read a * packet from the socket and process it. If the read fails, * we have drained all pending requests so we call * xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; mreq = NULL; rcvflag = MSG_DONTWAIT; error = soreceive(xprt->xp_socket, &raddr, &uio, &mreq, NULL, &rcvflag); if (error == EWOULDBLOCK) { /* * We must re-test for readability after taking the * lock to protect us in the case where a new packet * arrives on the socket after our call to soreceive * fails with EWOULDBLOCK. The pool lock protects us * from racing the upcall after our soreadable() call * returns false. */ SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (!soreadable(xprt->xp_socket)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); soupcall_clear(xprt->xp_socket, SO_RCV); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); xprt_inactive_self(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); xdrmbuf_create(&xdrs, mreq, XDR_DECODE); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = raddr; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } static bool_t svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error; mrep = m_gethdr(M_WAITOK, MT_DATA); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); error = sosend(xprt->xp_socket, addr, NULL, mrep, NULL, 0, curthread); if (!error) { stat = TRUE; } } else { m_freem(mrep); } XDR_DESTROY(&xdrs); xprt->xp_p2 = NULL; return (stat); } static void svc_dg_destroy(SVCXPRT *xprt) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); soupcall_clear(xprt->xp_socket, SO_RCV); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); sx_destroy(&xprt->xp_lock); if (xprt->xp_socket) (void)soclose(xprt->xp_socket); if (xprt->xp_netid) (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); svc_xprt_free(xprt); } static bool_t /*ARGSUSED*/ svc_dg_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static int svc_dg_soupcall(struct socket *so, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; xprt_active(xprt); return (SU_OK); } diff --git a/sys/rpc/svc_vc.c b/sys/rpc/svc_vc.c index a19d73a850b9..531bedb6b65a 100644 --- a/sys/rpc/svc_vc.c +++ b/sys/rpc/svc_vc.c @@ -1,1197 +1,1186 @@ /* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include /* * svc_vc.c, Server side for Connection Oriented based RPC. * * Actually implements two flavors of transporter - * a tcp rendezvouser (a listner and connection establisher) * and a record/tcp stream. */ #include "opt_kern_tls.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "RPC"); SYSCTL_NODE(_kern_rpc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "TLS"); SYSCTL_NODE(_kern_rpc, OID_AUTO, unenc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "unencrypted"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_rx_msgbytes) = 0; SYSCTL_U64(_kern_rpc_unenc, OID_AUTO, rx_msgbytes, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_rx_msgbytes), 0, "Count of non-TLS rx bytes"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_rx_msgcnt) = 0; SYSCTL_U64(_kern_rpc_unenc, OID_AUTO, rx_msgcnt, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_rx_msgcnt), 0, "Count of non-TLS rx messages"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tx_msgbytes) = 0; SYSCTL_U64(_kern_rpc_unenc, OID_AUTO, tx_msgbytes, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tx_msgbytes), 0, "Count of non-TLS tx bytes"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tx_msgcnt) = 0; SYSCTL_U64(_kern_rpc_unenc, OID_AUTO, tx_msgcnt, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tx_msgcnt), 0, "Count of non-TLS tx messages"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_alerts) = 0; SYSCTL_U64(_kern_rpc_tls, OID_AUTO, alerts, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_alerts), 0, "Count of TLS alert messages"); KRPC_VNET_DEFINE(uint64_t, svc_vc_tls_handshake_failed) = 0; SYSCTL_U64(_kern_rpc_tls, OID_AUTO, handshake_failed, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_handshake_failed), 0, "Count of TLS failed handshakes"); KRPC_VNET_DEFINE(uint64_t, svc_vc_tls_handshake_success) = 0; SYSCTL_U64(_kern_rpc_tls, OID_AUTO, handshake_success, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_handshake_success), 0, "Count of TLS successful handshakes"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_rx_msgbytes) = 0; SYSCTL_U64(_kern_rpc_tls, OID_AUTO, rx_msgbytes, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_rx_msgbytes), 0, "Count of TLS rx bytes"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_rx_msgcnt) = 0; SYSCTL_U64(_kern_rpc_tls, OID_AUTO, rx_msgcnt, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_rx_msgcnt), 0, "Count of TLS rx messages"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_tx_msgbytes) = 0; SYSCTL_U64(_kern_rpc_tls, OID_AUTO, tx_msgbytes, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_tx_msgbytes), 0, "Count of TLS tx bytes"); KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_tx_msgcnt) = 0; SYSCTL_U64(_kern_rpc_tls, OID_AUTO, tx_msgcnt, CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_tx_msgcnt), 0, "Count of TLS tx messages"); static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *); static void svc_vc_rendezvous_destroy(SVCXPRT *); static bool_t svc_vc_null(void); static void svc_vc_destroy(SVCXPRT *); static enum xprt_stat svc_vc_stat(SVCXPRT *); static bool_t svc_vc_ack(SVCXPRT *, uint32_t *); static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *seq); static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in); static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq, void *in); static void svc_vc_backchannel_destroy(SVCXPRT *); static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *); static bool_t svc_vc_backchannel_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_vc_backchannel_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in); static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr); static int svc_vc_accept(struct socket *head, struct socket **sop); static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag); static int svc_vc_rendezvous_soupcall(struct socket *, void *, int); static const struct xp_ops svc_vc_rendezvous_ops = { .xp_recv = svc_vc_rendezvous_recv, .xp_stat = svc_vc_rendezvous_stat, .xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *))svc_vc_null, .xp_destroy = svc_vc_rendezvous_destroy, .xp_control = svc_vc_rendezvous_control }; static const struct xp_ops svc_vc_ops = { .xp_recv = svc_vc_recv, .xp_stat = svc_vc_stat, .xp_ack = svc_vc_ack, .xp_reply = svc_vc_reply, .xp_destroy = svc_vc_destroy, .xp_control = svc_vc_control }; static const struct xp_ops svc_vc_backchannel_ops = { .xp_recv = svc_vc_backchannel_recv, .xp_stat = svc_vc_backchannel_stat, .xp_reply = svc_vc_backchannel_reply, .xp_destroy = svc_vc_backchannel_destroy, .xp_control = svc_vc_backchannel_control }; /* * Usage: * xprt = svc_vc_create(sock, send_buf_size, recv_buf_size); * * Creates, registers, and returns a (rpc) tcp based transporter. * Once *xprt is initialized, it is registered as a transporter * see (svc.h, xprt_register). This routine returns * a NULL if a problem occurred. * * The filedescriptor passed in is expected to refer to a bound, but * not yet connected socket. * * Since streams do buffered io similar to stdio, the caller can specify * how big the send and receive buffers are via the second and third parms; * 0 => use the system default. */ SVCXPRT * svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize, size_t recvsize) { SVCXPRT *xprt; - struct sockaddr* sa; int error; SOCK_LOCK(so); if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) { + struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; + SOCK_UNLOCK(so); - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_peeraddr(so, &sa); - CURVNET_RESTORE(); + error = sopeeraddr(so, (struct sockaddr *)&ss); if (error) return (NULL); - xprt = svc_vc_create_conn(pool, so, sa); - free(sa, M_SONAME); + xprt = svc_vc_create_conn(pool, so, (struct sockaddr *)&ss); return (xprt); } SOCK_UNLOCK(so); xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = NULL; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_rendezvous_ops; - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_sockaddr(so, &sa); - CURVNET_RESTORE(); + xprt->xp_ltaddr.ss_len = sizeof(xprt->xp_ltaddr); + error = sosockaddr(so, (struct sockaddr *)&xprt->xp_ltaddr); if (error) { goto cleanup_svc_vc_create; } - memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); - free(sa, M_SONAME); - xprt_register(xprt); solisten(so, -1, curthread); SOLISTEN_LOCK(so); xprt->xp_upcallset = 1; solisten_upcall_set(so, svc_vc_rendezvous_soupcall, xprt); SOLISTEN_UNLOCK(so); return (xprt); cleanup_svc_vc_create: sx_destroy(&xprt->xp_lock); svc_xprt_free(xprt); return (NULL); } /* * Create a new transport for a socket optained via soaccept(). */ SVCXPRT * svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr) { SVCXPRT *xprt; struct cf_conn *cd; - struct sockaddr* sa = NULL; struct sockopt opt; int one = 1; int error; bzero(&opt, sizeof(struct sockopt)); opt.sopt_dir = SOPT_SET; opt.sopt_level = SOL_SOCKET; opt.sopt_name = SO_KEEPALIVE; opt.sopt_val = &one; opt.sopt_valsize = sizeof(one); error = sosetopt(so, &opt); if (error) { return (NULL); } if (so->so_proto->pr_protocol == IPPROTO_TCP) { bzero(&opt, sizeof(struct sockopt)); opt.sopt_dir = SOPT_SET; opt.sopt_level = IPPROTO_TCP; opt.sopt_name = TCP_NODELAY; opt.sopt_val = &one; opt.sopt_valsize = sizeof(one); error = sosetopt(so, &opt); if (error) { return (NULL); } } cd = mem_alloc(sizeof(*cd)); cd->strm_stat = XPRT_IDLE; xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = cd; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_ops; /* * See http://www.connectathon.org/talks96/nfstcp.pdf - client * has a 5 minute timer, server has a 6 minute timer. */ xprt->xp_idletimeout = 6 * 60; memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len); - CURVNET_SET(so->so_vnet); - error = so->so_proto->pr_sockaddr(so, &sa); - CURVNET_RESTORE(); + xprt->xp_ltaddr.ss_len = sizeof(xprt->xp_ltaddr); + error = sosockaddr(so, (struct sockaddr *)&xprt->xp_ltaddr); if (error) goto cleanup_svc_vc_create; - memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); - free(sa, M_SONAME); - xprt_register(xprt); SOCKBUF_LOCK(&so->so_rcv); xprt->xp_upcallset = 1; soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt); SOCKBUF_UNLOCK(&so->so_rcv); /* * Throw the transport into the active list in case it already * has some data buffered. */ sx_xlock(&xprt->xp_lock); xprt_active(xprt); sx_xunlock(&xprt->xp_lock); return (xprt); cleanup_svc_vc_create: sx_destroy(&xprt->xp_lock); svc_xprt_free(xprt); mem_free(cd, sizeof(*cd)); return (NULL); } /* * Create a new transport for a backchannel on a clnt_vc socket. */ SVCXPRT * svc_vc_create_backchannel(SVCPOOL *pool) { SVCXPRT *xprt = NULL; struct cf_conn *cd = NULL; cd = mem_alloc(sizeof(*cd)); cd->strm_stat = XPRT_IDLE; xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = NULL; xprt->xp_p1 = cd; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_backchannel_ops; return (xprt); } /* * This does all of the accept except the final call to soaccept. The * caller will call soaccept after dropping its locks (soaccept may * call malloc). */ int svc_vc_accept(struct socket *head, struct socket **sop) { struct socket *so; int error = 0; short nbio; KASSERT(SOLISTENING(head), ("%s: socket %p is not listening", __func__, head)); #ifdef MAC error = mac_socket_check_accept(curthread->td_ucred, head); if (error != 0) goto done; #endif /* * XXXGL: we want non-blocking semantics. The socket could be a * socket created by kernel as well as socket shared with userland, * so we can't be sure about presense of SS_NBIO. We also shall not * toggle it on the socket, since that may surprise userland. So we * set SS_NBIO only temporarily. */ SOLISTEN_LOCK(head); nbio = head->so_state & SS_NBIO; head->so_state |= SS_NBIO; error = solisten_dequeue(head, &so, 0); head->so_state &= (nbio & ~SS_NBIO); if (error) goto done; so->so_state |= nbio; *sop = so; /* connection has been removed from the listen queue */ KNOTE_UNLOCKED(&head->so_rdsel.si_note, 0); done: return (error); } /*ARGSUSED*/ static bool_t svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct socket *so = NULL; struct sockaddr_storage ss = { .ss_len = sizeof(ss) }; int error; SVCXPRT *new_xprt; /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to accept a * connection from the socket and turn it into a new * transport. If the accept fails, we have drained all pending * connections so we call xprt_inactive(). */ sx_xlock(&xprt->xp_lock); error = svc_vc_accept(xprt->xp_socket, &so); if (error == EWOULDBLOCK) { /* * We must re-test for new connections after taking * the lock to protect us in the case where a new * connection arrives after our call to accept fails * with EWOULDBLOCK. */ SOLISTEN_LOCK(xprt->xp_socket); if (TAILQ_EMPTY(&xprt->xp_socket->sol_comp)) xprt_inactive_self(xprt); SOLISTEN_UNLOCK(xprt->xp_socket); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOLISTEN_LOCK(xprt->xp_socket); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(xprt->xp_socket, SO_RCV); } SOLISTEN_UNLOCK(xprt->xp_socket); xprt_inactive_self(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); error = soaccept(so, (struct sockaddr *)&ss); if (error) { /* * XXX not sure if I need to call sofree or soclose here. */ return (FALSE); } /* * svc_vc_create_conn will call xprt_register - we don't need * to do anything with the new connection except derefence it. */ new_xprt = svc_vc_create_conn(xprt->xp_pool, so, (struct sockaddr *)&ss); if (!new_xprt) { soclose(so); } else { SVC_RELEASE(new_xprt); } return (FALSE); /* there is never an rpc msg to be processed */ } /*ARGSUSED*/ static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *xprt) { return (XPRT_IDLE); } static void svc_vc_destroy_common(SVCXPRT *xprt) { uint32_t reterr; if (xprt->xp_socket) { if ((xprt->xp_tls & (RPCTLS_FLAGS_HANDSHAKE | RPCTLS_FLAGS_HANDSHFAIL)) != 0) { if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * If the upcall fails, the socket has * probably been closed via the rpctlssd * daemon having crashed or been * restarted, so just ignore returned stat. */ rpctls_srv_disconnect(xprt->xp_sslsec, xprt->xp_sslusec, xprt->xp_sslrefno, xprt->xp_sslproc, &reterr); } /* Must sorele() to get rid of reference. */ CURVNET_SET(xprt->xp_socket->so_vnet); sorele(xprt->xp_socket); CURVNET_RESTORE(); } else (void)soclose(xprt->xp_socket); } if (xprt->xp_netid) (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); svc_xprt_free(xprt); } static void svc_vc_rendezvous_destroy(SVCXPRT *xprt) { SOLISTEN_LOCK(xprt->xp_socket); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; solisten_upcall_set(xprt->xp_socket, NULL, NULL); } SOLISTEN_UNLOCK(xprt->xp_socket); svc_vc_destroy_common(xprt); } static void svc_vc_destroy(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; CLIENT *cl = (CLIENT *)xprt->xp_p2; SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; if (xprt->xp_socket->so_rcv.sb_upcall != NULL) soupcall_clear(xprt->xp_socket, SO_RCV); } SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); if (cl != NULL) CLNT_RELEASE(cl); svc_vc_destroy_common(xprt); if (cd->mreq) m_freem(cd->mreq); if (cd->mpending) m_freem(cd->mpending); mem_free(cd, sizeof(*cd)); } static void svc_vc_backchannel_destroy(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; struct mbuf *m, *m2; svc_xprt_free(xprt); m = cd->mreq; while (m != NULL) { m2 = m; m = m->m_nextpkt; m_freem(m2); } mem_free(cd, sizeof(*cd)); } /*ARGSUSED*/ static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static bool_t svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static enum xprt_stat svc_vc_stat(SVCXPRT *xprt) { struct cf_conn *cd; cd = (struct cf_conn *)(xprt->xp_p1); if (cd->strm_stat == XPRT_DIED) return (XPRT_DIED); if (cd->mreq != NULL && cd->resid == 0 && cd->eor) return (XPRT_MOREREQS); if (soreadable(xprt->xp_socket)) return (XPRT_MOREREQS); return (XPRT_IDLE); } static bool_t svc_vc_ack(SVCXPRT *xprt, uint32_t *ack) { *ack = atomic_load_acq_32(&xprt->xp_snt_cnt); *ack -= sbused(&xprt->xp_socket->so_snd); return (TRUE); } static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *xprt) { struct cf_conn *cd; cd = (struct cf_conn *)(xprt->xp_p1); if (cd->mreq != NULL) return (XPRT_MOREREQS); return (XPRT_IDLE); } /* * If we have an mbuf chain in cd->mpending, try to parse a record from it, * leaving the result in cd->mreq. If we don't have a complete record, leave * the partial result in cd->mreq and try to read more from the socket. */ static int svc_vc_process_pending(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct socket *so = xprt->xp_socket; struct mbuf *m; /* * If cd->resid is non-zero, we have part of the * record already, otherwise we are expecting a record * marker. */ if (!cd->resid && cd->mpending) { /* * See if there is enough data buffered to * make up a record marker. Make sure we can * handle the case where the record marker is * split across more than one mbuf. */ size_t n = 0; uint32_t header; m = cd->mpending; while (n < sizeof(uint32_t) && m) { n += m->m_len; m = m->m_next; } if (n < sizeof(uint32_t)) { so->so_rcv.sb_lowat = sizeof(uint32_t) - n; return (FALSE); } m_copydata(cd->mpending, 0, sizeof(header), (char *)&header); header = ntohl(header); cd->eor = (header & 0x80000000) != 0; cd->resid = header & 0x7fffffff; m_adj(cd->mpending, sizeof(uint32_t)); } /* * Start pulling off mbufs from cd->mpending * until we either have a complete record or * we run out of data. We use m_split to pull * data - it will pull as much as possible and * split the last mbuf if necessary. */ while (cd->mpending && cd->resid) { m = cd->mpending; if (cd->mpending->m_next || cd->mpending->m_len > cd->resid) cd->mpending = m_split(cd->mpending, cd->resid, M_WAITOK); else cd->mpending = NULL; if (cd->mreq) m_last(cd->mreq)->m_next = m; else cd->mreq = m; while (m) { cd->resid -= m->m_len; m = m->m_next; } } /* * Block receive upcalls if we have more data pending, * otherwise report our need. */ if (cd->mpending) so->so_rcv.sb_lowat = INT_MAX; else so->so_rcv.sb_lowat = imax(1, imin(cd->resid, so->so_rcv.sb_hiwat / 2)); return (TRUE); } static bool_t svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct uio uio; struct mbuf *m, *ctrl; struct socket* so = xprt->xp_socket; XDR xdrs; int error, rcvflag; uint32_t reterr, xid_plus_direction[2]; struct cmsghdr *cmsg; struct tls_get_record tgr; enum clnt_stat ret; /* * Serialise access to the socket and our own record parsing * state. */ sx_xlock(&xprt->xp_lock); for (;;) { /* If we have no request ready, check pending queue. */ while (cd->mpending && (cd->mreq == NULL || cd->resid != 0 || !cd->eor)) { if (!svc_vc_process_pending(xprt)) break; } /* Process and return complete request in cd->mreq. */ if (cd->mreq != NULL && cd->resid == 0 && cd->eor) { /* * Now, check for a backchannel reply. * The XID is in the first uint32_t of the reply * and the message direction is the second one. */ if ((cd->mreq->m_len >= sizeof(xid_plus_direction) || m_length(cd->mreq, NULL) >= sizeof(xid_plus_direction)) && xprt->xp_p2 != NULL) { m_copydata(cd->mreq, 0, sizeof(xid_plus_direction), (char *)xid_plus_direction); xid_plus_direction[0] = ntohl(xid_plus_direction[0]); xid_plus_direction[1] = ntohl(xid_plus_direction[1]); /* Check message direction. */ if (xid_plus_direction[1] == REPLY) { clnt_bck_svccall(xprt->xp_p2, cd->mreq, xid_plus_direction[0]); cd->mreq = NULL; continue; } } xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE); cd->mreq = NULL; /* Check for next request in a pending queue. */ svc_vc_process_pending(xprt); if (cd->mreq == NULL || cd->resid != 0) { SOCKBUF_LOCK(&so->so_rcv); if (!soreadable(so)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&so->so_rcv); } sx_xunlock(&xprt->xp_lock); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } /* * If receiving is disabled so that a TLS handshake can be * done by the rpctlssd daemon, return FALSE here. */ rcvflag = MSG_DONTWAIT; if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) rcvflag |= MSG_TLSAPPDATA; tryagain: if (xprt->xp_dontrcv) { sx_xunlock(&xprt->xp_lock); return (FALSE); } /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to * read as much as possible from the socket and put * the result in cd->mpending. If the read fails, * we have drained both cd->mpending and the socket so * we can call xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; ctrl = m = NULL; error = soreceive(so, NULL, &uio, &m, &ctrl, &rcvflag); if (error == EWOULDBLOCK) { /* * We must re-test for readability after * taking the lock to protect us in the case * where a new packet arrives on the socket * after our call to soreceive fails with * EWOULDBLOCK. */ SOCKBUF_LOCK(&so->so_rcv); if (!soreadable(so)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&so->so_rcv); sx_xunlock(&xprt->xp_lock); return (FALSE); } /* * A return of ENXIO indicates that there is an * alert record at the head of the * socket's receive queue, for TLS connections. * This record needs to be handled in userland * via an SSL_read() call, so do an upcall to the daemon. */ KRPC_CURVNET_SET(so->so_vnet); if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0 && error == ENXIO) { KRPC_VNET(svc_vc_tls_alerts)++; KRPC_CURVNET_RESTORE(); /* Disable reception. */ xprt->xp_dontrcv = TRUE; sx_xunlock(&xprt->xp_lock); ret = rpctls_srv_handlerecord(xprt->xp_sslsec, xprt->xp_sslusec, xprt->xp_sslrefno, xprt->xp_sslproc, &reterr); sx_xlock(&xprt->xp_lock); xprt->xp_dontrcv = FALSE; if (ret != RPC_SUCCESS || reterr != RPCTLSERR_OK) { /* * All we can do is soreceive() it and * then toss it. */ rcvflag = MSG_DONTWAIT; goto tryagain; } sx_xunlock(&xprt->xp_lock); xprt_active(xprt); /* Harmless if already active. */ return (FALSE); } if (error) { KRPC_CURVNET_RESTORE(); SOCKBUF_LOCK(&so->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(so, SO_RCV); } SOCKBUF_UNLOCK(&so->so_rcv); xprt_inactive_self(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } if (!m) { KRPC_CURVNET_RESTORE(); /* * EOF - the other end has closed the socket. */ xprt_inactive_self(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } /* Process any record header(s). */ if (ctrl != NULL) { cmsg = mtod(ctrl, struct cmsghdr *); if (cmsg->cmsg_type == TLS_GET_RECORD && cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) { memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr)); /* * TLS_RLTYPE_ALERT records should be handled * since soreceive() would have returned * ENXIO. Just throw any other * non-TLS_RLTYPE_APP records away. */ if (tgr.tls_type != TLS_RLTYPE_APP) { m_freem(m); m_free(ctrl); rcvflag = MSG_DONTWAIT | MSG_TLSAPPDATA; KRPC_CURVNET_RESTORE(); goto tryagain; } KRPC_VNET(svc_vc_tls_rx_msgcnt)++; KRPC_VNET(svc_vc_tls_rx_msgbytes) += 1000000000 - uio.uio_resid; } m_free(ctrl); } else { KRPC_VNET(svc_vc_rx_msgcnt)++; KRPC_VNET(svc_vc_rx_msgbytes) += 1000000000 - uio.uio_resid; } KRPC_CURVNET_RESTORE(); if (cd->mpending) m_last(cd->mpending)->m_next = m; else cd->mpending = m; } } static bool_t svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct ct_data *ct; struct mbuf *m; XDR xdrs; sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct == NULL) { sx_xunlock(&xprt->xp_lock); return (FALSE); } mtx_lock(&ct->ct_lock); m = cd->mreq; if (m == NULL) { xprt_inactive_self(xprt); mtx_unlock(&ct->ct_lock); sx_xunlock(&xprt->xp_lock); return (FALSE); } cd->mreq = m->m_nextpkt; mtx_unlock(&ct->ct_lock); sx_xunlock(&xprt->xp_lock); xdrmbuf_create(&xdrs, m, XDR_DECODE); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } static bool_t svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error, len, maxextsiz; #ifdef KERN_TLS u_int maxlen; #endif /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); len = mrep->m_pkthdr.len; *mtod(mrep, uint32_t *) = htonl(0x80000000 | (len - sizeof(uint32_t))); /* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */ KRPC_CURVNET_SET(xprt->xp_socket->so_vnet); if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * Copy the mbuf chain to a chain of * ext_pgs mbuf(s) as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mrep = _rpc_copym_into_ext_pgs(mrep, maxextsiz); KRPC_VNET(svc_vc_tls_tx_msgcnt)++; KRPC_VNET(svc_vc_tls_tx_msgbytes) += len; } else { KRPC_VNET(svc_vc_tx_msgcnt)++; KRPC_VNET(svc_vc_tx_msgbytes) += len; } KRPC_CURVNET_RESTORE(); atomic_add_32(&xprt->xp_snd_cnt, len); /* * sosend consumes mreq. */ error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL, 0, curthread); if (!error) { atomic_add_rel_32(&xprt->xp_snt_cnt, len); if (seq) *seq = xprt->xp_snd_cnt; stat = TRUE; } else atomic_subtract_32(&xprt->xp_snd_cnt, len); } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); } static bool_t svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { struct ct_data *ct; XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error, maxextsiz; #ifdef KERN_TLS u_int maxlen; #endif /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); *mtod(mrep, uint32_t *) = htonl(0x80000000 | (mrep->m_pkthdr.len - sizeof(uint32_t))); /* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */ if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * Copy the mbuf chain to a chain of * ext_pgs mbuf(s) as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mrep = _rpc_copym_into_ext_pgs(mrep, maxextsiz); } sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct != NULL) error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL, 0, curthread); else error = EPIPE; sx_xunlock(&xprt->xp_lock); if (!error) { stat = TRUE; } } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); } static bool_t svc_vc_null(void) { return (FALSE); } static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; if (soreadable(xprt->xp_socket)) xprt_active(xprt); return (SU_OK); } static int svc_vc_rendezvous_soupcall(struct socket *head, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; if (!TAILQ_EMPTY(&head->sol_comp)) xprt_active(xprt); return (SU_OK); } #if 0 /* * Get the effective UID of the sending process. Used by rpcbind, keyserv * and rpc.yppasswdd on AF_LOCAL. */ int __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) { int sock, ret; gid_t egid; uid_t euid; struct sockaddr *sa; sock = transp->xp_fd; sa = (struct sockaddr *)transp->xp_rtaddr; if (sa->sa_family == AF_LOCAL) { ret = getpeereid(sock, &euid, &egid); if (ret == 0) *uid = euid; return (ret); } else return (-1); } #endif diff --git a/sys/sys/protosw.h b/sys/sys/protosw.h index 629e7570cb7e..eea231f82807 100644 --- a/sys/sys/protosw.h +++ b/sys/sys/protosw.h @@ -1,204 +1,204 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_PROTOSW_H_ #define _SYS_PROTOSW_H_ #include /* Forward declare these structures referenced from prototypes below. */ struct kaiocb; struct mbuf; struct thread; struct sockaddr; struct socket; struct sockopt; /*#ifdef _KERNEL*/ /* * Protocol switch table. * * Each protocol has a handle initializing one of these structures, * which is used for protocol-protocol and system-protocol communication. * * In retrospect, it would be a lot nicer to use an interface * similar to the vnode VOP interface. */ struct ifnet; struct stat; struct ucred; struct uio; /* USE THESE FOR YOUR PROTOTYPES ! */ typedef int pr_ctloutput_t(struct socket *, struct sockopt *); typedef int pr_setsbopt_t(struct socket *, struct sockopt *); typedef void pr_abort_t(struct socket *); typedef int pr_accept_t(struct socket *, struct sockaddr *); typedef int pr_attach_t(struct socket *, int, struct thread *); typedef int pr_bind_t(struct socket *, struct sockaddr *, struct thread *); typedef int pr_connect_t(struct socket *, struct sockaddr *, struct thread *); typedef int pr_connect2_t(struct socket *, struct socket *); typedef int pr_control_t(struct socket *, unsigned long, void *, struct ifnet *, struct thread *); typedef void pr_detach_t(struct socket *); typedef int pr_disconnect_t(struct socket *); typedef int pr_listen_t(struct socket *, int, struct thread *); -typedef int pr_peeraddr_t(struct socket *, struct sockaddr **); +typedef int pr_peeraddr_t(struct socket *, struct sockaddr *); typedef int pr_rcvd_t(struct socket *, int); typedef int pr_rcvoob_t(struct socket *, struct mbuf *, int); typedef enum { PRUS_OOB = 0x1, PRUS_EOF = 0x2, PRUS_MORETOCOME = 0x4, PRUS_NOTREADY = 0x8, PRUS_IPV6 = 0x10, } pr_send_flags_t; typedef int pr_send_t(struct socket *, int, struct mbuf *, struct sockaddr *, struct mbuf *, struct thread *); typedef int pr_ready_t(struct socket *, struct mbuf *, int); typedef int pr_sense_t(struct socket *, struct stat *); typedef int pr_shutdown_t(struct socket *); typedef int pr_flush_t(struct socket *, int); -typedef int pr_sockaddr_t(struct socket *, struct sockaddr **); +typedef int pr_sockaddr_t(struct socket *, struct sockaddr *); typedef int pr_sosend_t(struct socket *, struct sockaddr *, struct uio *, struct mbuf *, struct mbuf *, int, struct thread *); typedef int pr_soreceive_t(struct socket *, struct sockaddr **, struct uio *, struct mbuf **, struct mbuf **, int *); typedef int pr_sopoll_t(struct socket *, int, struct ucred *, struct thread *); typedef void pr_sosetlabel_t(struct socket *); typedef void pr_close_t(struct socket *); typedef int pr_bindat_t(int, struct socket *, struct sockaddr *, struct thread *); typedef int pr_connectat_t(int, struct socket *, struct sockaddr *, struct thread *); typedef int pr_aio_queue_t(struct socket *, struct kaiocb *); struct protosw { short pr_type; /* socket type used for */ short pr_protocol; /* protocol number */ short pr_flags; /* see below */ short pr_unused; struct domain *pr_domain; /* domain protocol a member of */ pr_soreceive_t *pr_soreceive; /* recv(2) */ pr_rcvd_t *pr_rcvd; /* soreceive_generic() if PR_WANTRCVD */ pr_sosend_t *pr_sosend; /* send(2) */ pr_send_t *pr_send; /* send(2) via sosend_generic() */ pr_ready_t *pr_ready; /* sendfile/ktls readyness */ pr_sopoll_t *pr_sopoll; /* poll(2) */ /* Cache line #2 */ pr_attach_t *pr_attach; /* creation: socreate(), sonewconn() */ pr_detach_t *pr_detach; /* destruction: sofree() */ pr_connect_t *pr_connect; /* connect(2) */ pr_disconnect_t *pr_disconnect; /* sodisconnect() */ pr_close_t *pr_close; /* close(2) */ pr_shutdown_t *pr_shutdown; /* shutdown(2) */ pr_abort_t *pr_abort; /* abrupt tear down: soabort() */ pr_aio_queue_t *pr_aio_queue; /* aio(9) */ /* Cache line #3 */ pr_bind_t *pr_bind; /* bind(2) */ pr_bindat_t *pr_bindat; /* bindat(2) */ pr_listen_t *pr_listen; /* listen(2) */ pr_accept_t *pr_accept; /* accept(2) */ pr_connectat_t *pr_connectat; /* connectat(2) */ pr_connect2_t *pr_connect2; /* socketpair(2) */ pr_control_t *pr_control; /* ioctl(2) */ pr_rcvoob_t *pr_rcvoob; /* soreceive_rcvoob() */ /* Cache line #4 */ pr_ctloutput_t *pr_ctloutput; /* control output (from above) */ pr_peeraddr_t *pr_peeraddr; /* getpeername(2) */ pr_sockaddr_t *pr_sockaddr; /* getsockname(2) */ pr_sense_t *pr_sense; /* stat(2) */ pr_flush_t *pr_flush; /* XXXGL: merge with pr_shutdown_t! */ pr_sosetlabel_t *pr_sosetlabel; /* MAC, XXXGL: remove */ pr_setsbopt_t *pr_setsbopt; /* Socket buffer ioctls */ }; /*#endif*/ /* * Values for pr_flags. * PR_ADDR requires PR_ATOMIC; * PR_ADDR and PR_CONNREQUIRED are mutually exclusive. * PR_IMPLOPCL means that the protocol allows sendto without prior connect, * and the protocol understands the MSG_EOF flag. The first property is * is only relevant if PR_CONNREQUIRED is set (otherwise sendto is allowed * anyhow). * PR_SOCKBUF requires protocol to initialize and destroy its socket buffers * in its pr_attach and pr_detach. */ #define PR_ATOMIC 0x01 /* exchange atomic messages only */ #define PR_ADDR 0x02 /* addresses given with messages */ #define PR_CONNREQUIRED 0x04 /* connection required by protocol */ #define PR_WANTRCVD 0x08 /* want PRU_RCVD calls */ #define PR_RIGHTS 0x10 /* passes capabilities */ #define PR_IMPLOPCL 0x20 /* implied open/close */ /* was PR_LASTHDR 0x40 enforce ipsec policy; last header */ #define PR_CAPATTACH 0x80 /* socket can attach in cap mode */ #define PR_SOCKBUF 0x100 /* private implementation of buffers */ /* * The arguments to ctloutput are: * (*protosw[].pr_ctloutput)(req, so, level, optname, optval, p); * req is one of the actions listed below, so is a (struct socket *), * level is an indication of which protocol layer the option is intended. * optname is a protocol dependent socket option request, * optval is a pointer to a mbuf-chain pointer, for value-return results. * The protocol is responsible for disposal of the mbuf chain *optval * if supplied, * the caller is responsible for any space held by *optval, when returned. * A non-zero return from ctloutput gives an * UNIX error number which should be passed to higher level software. */ #define PRCO_GETOPT 0 #define PRCO_SETOPT 1 #define PRCO_NCMDS 2 #ifdef PRCOREQUESTS char *prcorequests[] = { "GETOPT", "SETOPT", }; #endif #ifdef _KERNEL struct domain *pffinddomain(int family); struct protosw *pffindproto(int family, int type, int proto); int protosw_register(struct domain *, struct protosw *); int protosw_unregister(struct protosw *); /* Domains that are known to be avaliable for protosw_register(). */ extern struct domain inetdomain; extern struct domain inet6domain; #endif #endif diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h index 005938aee08a..ae7baa87d202 100644 --- a/sys/sys/socketvar.h +++ b/sys/sys/socketvar.h @@ -1,599 +1,601 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_SOCKETVAR_H_ #define _SYS_SOCKETVAR_H_ /* * Socket generation count type. Also used in xinpcb, xtcpcb, xunpcb. */ typedef uint64_t so_gen_t; #if defined(_KERNEL) || defined(_WANT_SOCKET) #include /* for TAILQ macros */ #include /* for struct selinfo */ #include #include #include #include #include #ifdef _KERNEL #include #include #endif struct vnet; /* * Kernel structure per socket. * Contains send and receive buffer queues, * handle on protocol and pointer to protocol * private data and error information. */ typedef int so_upcall_t(struct socket *, void *, int); typedef void so_dtor_t(struct socket *); struct socket; enum socket_qstate { SQ_NONE = 0, SQ_INCOMP = 0x0800, /* on sol_incomp */ SQ_COMP = 0x1000, /* on sol_comp */ }; /*- * Locking key to struct socket: * (a) constant after allocation, no locking required. * (b) locked by SOCK_LOCK(so). * (cr) locked by SOCK_RECVBUF_LOCK(so) * (cs) locked by SOCK_SENDBUF_LOCK(so) * (e) locked by SOLISTEN_LOCK() of corresponding listening socket. * (f) not locked since integer reads/writes are atomic. * (g) used only as a sleep/wakeup address, no value. * (h) locked by global mutex so_global_mtx. * (k) locked by KTLS workqueue mutex */ TAILQ_HEAD(accept_queue, socket); struct socket { struct mtx so_lock; volatile u_int so_count; /* (b / refcount) */ struct selinfo so_rdsel; /* (b/cr) for so_rcv/so_comp */ struct selinfo so_wrsel; /* (b/cs) for so_snd */ int so_options; /* (b) from socket call, see socket.h */ short so_type; /* (a) generic type, see socket.h */ short so_state; /* (b) internal state flags SS_* */ void *so_pcb; /* protocol control block */ struct vnet *so_vnet; /* (a) network stack instance */ struct protosw *so_proto; /* (a) protocol handle */ short so_linger; /* time to linger close(2) */ short so_timeo; /* (g) connection timeout */ u_short so_error; /* (f) error affecting connection */ u_short so_rerror; /* (f) error affecting connection */ struct sigio *so_sigio; /* [sg] information for async I/O or out of band data (SIGURG) */ struct ucred *so_cred; /* (a) user credentials */ struct label *so_label; /* (b) MAC label for socket */ /* NB: generation count must not be first. */ so_gen_t so_gencnt; /* (h) generation count */ void *so_emuldata; /* (b) private data for emulators */ so_dtor_t *so_dtor; /* (b) optional destructor */ struct osd osd; /* Object Specific extensions */ /* * so_fibnum, so_user_cookie and friends can be used to attach * some user-specified metadata to a socket, which then can be * used by the kernel for various actions. * so_user_cookie is used by ipfw/dummynet. */ int so_fibnum; /* routing domain for this socket */ uint32_t so_user_cookie; int so_ts_clock; /* type of the clock used for timestamps */ uint32_t so_max_pacing_rate; /* (f) TX rate limit in bytes/s */ /* * Mutexes to prevent interleaving of socket I/O. These have to be * outside of the socket buffers in order to interlock with listen(2). */ struct sx so_snd_sx __aligned(CACHE_LINE_SIZE); struct mtx so_snd_mtx; struct sx so_rcv_sx __aligned(CACHE_LINE_SIZE); struct mtx so_rcv_mtx; union { /* Regular (data flow) socket. */ struct { /* (cr, cs) Receive and send buffers. */ struct sockbuf so_rcv, so_snd; /* (e) Our place on accept queue. */ TAILQ_ENTRY(socket) so_list; struct socket *so_listen; /* (b) */ enum socket_qstate so_qstate; /* (b) */ /* (b) cached MAC label for peer */ struct label *so_peerlabel; u_long so_oobmark; /* chars to oob mark */ /* (k) Our place on KTLS RX work queue. */ STAILQ_ENTRY(socket) so_ktls_rx_list; }; /* * Listening socket, where accepts occur, is so_listen in all * subsidiary sockets. If so_listen is NULL, socket is not * related to an accept. For a listening socket itself * sol_incomp queues partially completed connections, while * sol_comp is a queue of connections ready to be accepted. * If a connection is aborted and it has so_listen set, then * it has to be pulled out of either sol_incomp or sol_comp. * We allow connections to queue up based on current queue * lengths and limit on number of queued connections for this * socket. */ struct { /* (e) queue of partial unaccepted connections */ struct accept_queue sol_incomp; /* (e) queue of complete unaccepted connections */ struct accept_queue sol_comp; u_int sol_qlen; /* (e) sol_comp length */ u_int sol_incqlen; /* (e) sol_incomp length */ u_int sol_qlimit; /* (e) queue limit */ /* accept_filter(9) optional data */ struct accept_filter *sol_accept_filter; void *sol_accept_filter_arg; /* saved filter args */ char *sol_accept_filter_str; /* saved user args */ /* Optional upcall, for kernel socket. */ so_upcall_t *sol_upcall; /* (e) */ void *sol_upcallarg; /* (e) */ /* Socket buffer parameters, to be copied to * dataflow sockets, accepted from this one. */ int sol_sbrcv_lowat; int sol_sbsnd_lowat; u_int sol_sbrcv_hiwat; u_int sol_sbsnd_hiwat; short sol_sbrcv_flags; short sol_sbsnd_flags; sbintime_t sol_sbrcv_timeo; sbintime_t sol_sbsnd_timeo; /* Information tracking listen queue overflows. */ struct timeval sol_lastover; /* (e) */ int sol_overcount; /* (e) */ }; }; }; #endif /* defined(_KERNEL) || defined(_WANT_SOCKET) */ /* * Socket state bits. * * Historically, these bits were all kept in the so_state field. * They are now split into separate, lock-specific fields. * so_state maintains basic socket state protected by the socket lock. * so_qstate holds information about the socket accept queues. * Each socket buffer also has a state field holding information * relevant to that socket buffer (can't send, rcv). * Many fields will be read without locks to improve performance and avoid * lock order issues. However, this approach must be used with caution. */ #define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ #define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ #define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ #define SS_NBIO 0x0100 /* non-blocking ops */ #define SS_ASYNC 0x0200 /* async i/o notify */ #define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */ #define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ #ifdef _KERNEL #define SOCK_MTX(so) (&(so)->so_lock) #define SOCK_LOCK(so) mtx_lock(&(so)->so_lock) #define SOCK_OWNED(so) mtx_owned(&(so)->so_lock) #define SOCK_UNLOCK(so) mtx_unlock(&(so)->so_lock) #define SOCK_LOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_OWNED) #define SOCK_UNLOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_NOTOWNED) #define SOLISTENING(sol) (((sol)->so_options & SO_ACCEPTCONN) != 0) #define SOLISTEN_LOCK(sol) do { \ mtx_lock(&(sol)->so_lock); \ KASSERT(SOLISTENING(sol), \ ("%s: %p not listening", __func__, (sol))); \ } while (0) #define SOLISTEN_TRYLOCK(sol) mtx_trylock(&(sol)->so_lock) #define SOLISTEN_UNLOCK(sol) do { \ KASSERT(SOLISTENING(sol), \ ("%s: %p not listening", __func__, (sol))); \ mtx_unlock(&(sol)->so_lock); \ } while (0) #define SOLISTEN_LOCK_ASSERT(sol) do { \ mtx_assert(&(sol)->so_lock, MA_OWNED); \ KASSERT(SOLISTENING(sol), \ ("%s: %p not listening", __func__, (sol))); \ } while (0) #define SOLISTEN_UNLOCK_ASSERT(sol) do { \ mtx_assert(&(sol)->so_lock, MA_NOTOWNED); \ KASSERT(SOLISTENING(sol), \ ("%s: %p not listening", __func__, (sol))); \ } while (0) /* * Socket buffer locks. These are strongly preferred over SOCKBUF_LOCK(sb) * macros, as we are moving towards protocol specific socket buffers. */ #define SOCK_RECVBUF_MTX(so) \ (&(so)->so_rcv_mtx) #define SOCK_RECVBUF_LOCK(so) \ mtx_lock(SOCK_RECVBUF_MTX(so)) #define SOCK_RECVBUF_UNLOCK(so) \ mtx_unlock(SOCK_RECVBUF_MTX(so)) #define SOCK_RECVBUF_LOCK_ASSERT(so) \ mtx_assert(SOCK_RECVBUF_MTX(so), MA_OWNED) #define SOCK_RECVBUF_UNLOCK_ASSERT(so) \ mtx_assert(SOCK_RECVBUF_MTX(so), MA_NOTOWNED) #define SOCK_SENDBUF_MTX(so) \ (&(so)->so_snd_mtx) #define SOCK_SENDBUF_LOCK(so) \ mtx_lock(SOCK_SENDBUF_MTX(so)) #define SOCK_SENDBUF_UNLOCK(so) \ mtx_unlock(SOCK_SENDBUF_MTX(so)) #define SOCK_SENDBUF_LOCK_ASSERT(so) \ mtx_assert(SOCK_SENDBUF_MTX(so), MA_OWNED) #define SOCK_SENDBUF_UNLOCK_ASSERT(so) \ mtx_assert(SOCK_SENDBUF_MTX(so), MA_NOTOWNED) #define SOCK_BUF_LOCK(so, which) \ mtx_lock(soeventmtx(so, which)) #define SOCK_BUF_UNLOCK(so, which) \ mtx_unlock(soeventmtx(so, which)) #define SOCK_BUF_LOCK_ASSERT(so, which) \ mtx_assert(soeventmtx(so, which), MA_OWNED) #define SOCK_BUF_UNLOCK_ASSERT(so, which) \ mtx_assert(soeventmtx(so, which), MA_NOTOWNED) static inline struct sockbuf * sobuf(struct socket *so, const sb_which which) { return (which == SO_RCV ? &so->so_rcv : &so->so_snd); } static inline struct mtx * soeventmtx(struct socket *so, const sb_which which) { return (which == SO_RCV ? SOCK_RECVBUF_MTX(so) : SOCK_SENDBUF_MTX(so)); } /* * Macros for sockets and socket buffering. */ /* * Flags to soiolock(). */ #define SBL_WAIT 0x00000001 /* Wait if not immediately available. */ #define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */ #define SBL_VALID (SBL_WAIT | SBL_NOINTR) #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) #define SOCK_IO_SEND_LOCK(so, flags) \ soiolock((so), &(so)->so_snd_sx, (flags)) #define SOCK_IO_SEND_UNLOCK(so) \ soiounlock(&(so)->so_snd_sx) #define SOCK_IO_SEND_OWNED(so) sx_xlocked(&(so)->so_snd_sx) #define SOCK_IO_RECV_LOCK(so, flags) \ soiolock((so), &(so)->so_rcv_sx, (flags)) #define SOCK_IO_RECV_UNLOCK(so) \ soiounlock(&(so)->so_rcv_sx) #define SOCK_IO_RECV_OWNED(so) sx_xlocked(&(so)->so_rcv_sx) /* do we have to send all at once on a socket? */ #define sosendallatonce(so) \ ((so)->so_proto->pr_flags & PR_ATOMIC) /* can we read something from so? */ #define soreadabledata(so) \ (sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \ (so)->so_error || (so)->so_rerror) #define soreadable(so) \ (soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE)) /* can we write something to so? */ #define sowriteable(so) \ ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ (((so)->so_state&SS_ISCONNECTED) || \ ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ (so)->so_error) /* * soref()/sorele() ref-count the socket structure. * soref() may be called without owning socket lock, but in that case a * caller must own something that holds socket, and so_count must be not 0. * Note that you must still explicitly close the socket, but the last ref * count will free the structure. */ #define soref(so) refcount_acquire(&(so)->so_count) #define sorele(so) do { \ SOCK_UNLOCK_ASSERT(so); \ if (!refcount_release_if_not_last(&(so)->so_count)) { \ SOCK_LOCK(so); \ sorele_locked(so); \ } \ } while (0) /* * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to * avoid a non-atomic test-and-wakeup. However, sowakeup is * responsible for releasing the lock if it is called. We unlock only * if we don't call into sowakeup. If any code is introduced that * directly invokes the underlying sowakeup() primitives, it must * maintain the same semantics. */ #define sorwakeup(so) do { \ SOCK_RECVBUF_LOCK(so); \ sorwakeup_locked(so); \ } while (0) #define sowwakeup(so) do { \ SOCK_SENDBUF_LOCK(so); \ sowwakeup_locked(so); \ } while (0) struct accept_filter { char accf_name[16]; int (*accf_callback) (struct socket *so, void *arg, int waitflag); void * (*accf_create) (struct socket *so, char *arg); void (*accf_destroy) (struct socket *so); SLIST_ENTRY(accept_filter) accf_next; }; #define ACCEPT_FILTER_DEFINE(modname, filtname, cb, create, destroy, ver) \ static struct accept_filter modname##_filter = { \ .accf_name = filtname, \ .accf_callback = cb, \ .accf_create = create, \ .accf_destroy = destroy, \ }; \ static moduledata_t modname##_mod = { \ .name = __XSTRING(modname), \ .evhand = accept_filt_generic_mod_event, \ .priv = &modname##_filter, \ }; \ DECLARE_MODULE(modname, modname##_mod, SI_SUB_DRIVERS, \ SI_ORDER_MIDDLE); \ MODULE_VERSION(modname, ver) #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_ACCF); MALLOC_DECLARE(M_PCB); MALLOC_DECLARE(M_SONAME); #endif /* * Socket specific helper hook point identifiers * Do not leave holes in the sequence, hook registration is a loop. */ #define HHOOK_SOCKET_OPT 0 #define HHOOK_SOCKET_CREATE 1 #define HHOOK_SOCKET_RCV 2 #define HHOOK_SOCKET_SND 3 #define HHOOK_FILT_SOREAD 4 #define HHOOK_FILT_SOWRITE 5 #define HHOOK_SOCKET_CLOSE 6 #define HHOOK_SOCKET_LAST HHOOK_SOCKET_CLOSE struct socket_hhook_data { struct socket *so; struct mbuf *m; void *hctx; /* hook point specific data*/ int status; }; extern int maxsockets; extern u_long sb_max; extern so_gen_t so_gencnt; struct file; struct filecaps; struct filedesc; struct mbuf; struct sockaddr; struct ucred; struct uio; /* Return values for socket upcalls. */ #define SU_OK 0 #define SU_ISCONNECTED 1 /* * From uipc_socket and friends */ int getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr, size_t len); int getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp, struct filecaps *havecaps); int getsock(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp); void soabort(struct socket *so); int soaccept(struct socket *so, struct sockaddr *sa); +int sopeeraddr(struct socket *so, struct sockaddr *sa); +int sosockaddr(struct socket *so, struct sockaddr *sa); void soaio_enqueue(struct task *task); void soaio_rcv(void *context, int pending); void soaio_snd(void *context, int pending); int socheckuid(struct socket *so, uid_t uid); int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); int sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td); int soclose(struct socket *so); int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td); int soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td); int soconnect2(struct socket *so1, struct socket *so2); int socreate(int dom, struct socket **aso, int type, int proto, struct ucred *cred, struct thread *td); int sodisconnect(struct socket *so); void sodtor_set(struct socket *, so_dtor_t *); struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); void sohasoutofband(struct socket *so); int solisten(struct socket *so, int backlog, struct thread *td); void solisten_proto(struct socket *so, int backlog); void solisten_proto_abort(struct socket *so); int solisten_proto_check(struct socket *so); bool solisten_enqueue(struct socket *, int); int solisten_dequeue(struct socket *, struct socket **, int); struct socket * solisten_clone(struct socket *); struct socket * sonewconn(struct socket *head, int connstatus); struct socket * sopeeloff(struct socket *); int sopoll(struct socket *so, int events, struct ucred *active_cred, struct thread *td); int sopoll_generic(struct socket *so, int events, struct ucred *active_cred, struct thread *td); int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp); int soreceive_stream(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp); int soreceive_dgram(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp); int soreceive_generic(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp); void sorele_locked(struct socket *so); void sodealloc(struct socket *); int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); void sorflush(struct socket *so); int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td); int sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *control, int flags, struct proc *); int sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td); int sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td); int soshutdown(struct socket *so, int how); void soupcall_clear(struct socket *, sb_which); void soupcall_set(struct socket *, sb_which, so_upcall_t, void *); void solisten_upcall_set(struct socket *, so_upcall_t, void *); void sorwakeup_locked(struct socket *); void sowwakeup_locked(struct socket *); void sowakeup_aio(struct socket *, sb_which); void solisten_wakeup(struct socket *); int selsocket(struct socket *so, int events, struct timeval *tv, struct thread *td); void soisconnected(struct socket *so); void soisconnecting(struct socket *so); void soisdisconnected(struct socket *so); void soisdisconnecting(struct socket *so); void socantrcvmore(struct socket *so); void socantrcvmore_locked(struct socket *so); void socantsendmore(struct socket *so); void socantsendmore_locked(struct socket *so); void soroverflow(struct socket *so); void soroverflow_locked(struct socket *so); int soiolock(struct socket *so, struct sx *sx, int flags); void soiounlock(struct sx *sx); /* * Accept filter functions (duh). */ int accept_filt_add(struct accept_filter *filt); int accept_filt_del(char *name); struct accept_filter *accept_filt_get(char *name); #ifdef ACCEPT_FILTER_MOD #ifdef SYSCTL_DECL SYSCTL_DECL(_net_inet_accf); #endif int accept_filt_generic_mod_event(module_t mod, int event, void *data); #endif #endif /* _KERNEL */ /* * Structure to export socket from kernel to utilities, via sysctl(3). */ struct xsocket { ksize_t xso_len; /* length of this structure */ kvaddr_t xso_so; /* kernel address of struct socket */ kvaddr_t so_pcb; /* kernel address of struct inpcb */ uint64_t so_oobmark; int64_t so_spare64[8]; int32_t xso_protocol; int32_t xso_family; uint32_t so_qlen; uint32_t so_incqlen; uint32_t so_qlimit; pid_t so_pgid; uid_t so_uid; int32_t so_spare32[8]; int16_t so_type; int16_t so_options; int16_t so_linger; int16_t so_state; int16_t so_timeo; uint16_t so_error; struct xsockbuf { uint32_t sb_cc; uint32_t sb_hiwat; uint32_t sb_mbcnt; uint32_t sb_spare0; /* was sb_mcnt */ uint32_t sb_spare1; /* was sb_ccnt */ uint32_t sb_mbmax; int32_t sb_lowat; int32_t sb_timeo; int16_t sb_flags; } so_rcv, so_snd; }; #ifdef _KERNEL void sotoxsocket(struct socket *so, struct xsocket *xso); void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); #endif /* * Socket buffer state bits. Exported via libprocstat(3). */ #define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */ #define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ #define SBS_RCVATMARK 0x0040 /* at mark on input */ #endif /* !_SYS_SOCKETVAR_H_ */ diff --git a/sys/sys/syscallsubr.h b/sys/sys/syscallsubr.h index c28016b4b3f8..1eca098365e7 100644 --- a/sys/sys/syscallsubr.h +++ b/sys/sys/syscallsubr.h @@ -1,391 +1,389 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2002 Ian Dowse. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_SYSCALLSUBR_H_ #define _SYS_SYSCALLSUBR_H_ #include #include #include #include #include #include #include struct __wrusage; struct cpuset_copy_cb; struct file; struct filecaps; enum idtype; struct itimerval; struct image_args; struct jail; struct kevent; struct kevent_copyops; struct kld_file_stat; struct ksiginfo; struct mbuf; struct msghdr; struct msqid_ds; struct pollfd; struct ogetdirentries_args; struct rlimit; struct rusage; struct sched_param; struct sembuf; union semun; struct sockaddr; struct spacectl_range; struct stat; struct thr_param; struct timex; struct uio; struct vm_map; struct vmspace; typedef int (*mmap_check_fp_fn)(struct file *, int, int, int); struct mmap_req { vm_offset_t mr_hint; vm_size_t mr_len; int mr_prot; int mr_flags; int mr_fd; off_t mr_pos; mmap_check_fp_fn mr_check_fp_fn; }; uint64_t at2cnpflags(u_int at_flags, u_int mask); int kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen, size_t path_max); int kern_abort2(struct thread *td, const char *why, int nargs, void **uargs); int kern_accept(struct thread *td, int s, struct sockaddr *sa, struct file **fp); int kern_accept4(struct thread *td, int s, struct sockaddr *sa, int flags, struct file **fp); int kern_accessat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int flags, int mode); int kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta); int kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa); int kern_break(struct thread *td, uintptr_t *addr); int kern_cap_ioctls_limit(struct thread *td, int fd, u_long *cmds, size_t ncmds); int kern_cap_rights_limit(struct thread *td, int fd, cap_rights_t *rights); int kern_chdir(struct thread *td, const char *path, enum uio_seg pathseg); int kern_clock_getcpuclockid2(struct thread *td, id_t id, int which, clockid_t *clk_id); int kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts); int kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats); int kern_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); int kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats); void kern_thread_cputime(struct thread *targettd, struct timespec *ats); void kern_process_cputime(struct proc *targetp, struct timespec *ats); int kern_close_range(struct thread *td, int flags, u_int lowfd, u_int highfd); int kern_close(struct thread *td, int fd); int kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa); int kern_copy_file_range(struct thread *td, int infd, off_t *inoffp, int outfd, off_t *outoffp, size_t len, unsigned int flags); int user_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb); int kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); int kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, cpuset_t *maskp); int user_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *maskp, const struct cpuset_copy_cb *cb); int kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp, const struct cpuset_copy_cb *cb); int kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, const domainset_t *maskp, int policy, const struct cpuset_copy_cb *cb); int kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); int kern_cpuset_setid(struct thread *td, cpuwhich_t which, id_t id, cpusetid_t setid); int kern_dup(struct thread *td, u_int mode, int flags, int old, int new); int kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p, struct vmspace *oldvmspace); int kern_extattr_delete_fd(struct thread *td, int fd, int attrnamespace, const char *attrname); int kern_extattr_delete_path(struct thread *td, const char *path, int attrnamespace, const char *attrname, int follow, enum uio_seg pathseg); int kern_extattr_get_fd(struct thread *td, int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); int kern_extattr_get_path(struct thread *td, const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes, int follow, enum uio_seg pathseg); int kern_extattr_list_fd(struct thread *td, int fd, int attrnamespace, struct uio *auiop); int kern_extattr_list_path(struct thread *td, const char *path, int attrnamespace, struct uio *auiop, int follow, enum uio_seg pathseg); int kern_extattr_set_fd(struct thread *td, int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); int kern_extattr_set_path(struct thread *td, const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes, int follow, enum uio_seg pathseg); int kern_fchmodat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, mode_t mode, int flag); int kern_fchownat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int uid, int gid, int flag); int kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg); int kern_fcntl_freebsd(struct thread *td, int fd, int cmd, long arg); int kern_fhopen(struct thread *td, const struct fhandle *u_fhp, int flags); int kern_fhstat(struct thread *td, fhandle_t fh, struct stat *buf); int kern_fhstatfs(struct thread *td, fhandle_t fh, struct statfs *buf); int kern_fpathconf(struct thread *td, int fd, int name, long *valuep); int kern_freebsd11_getfsstat(struct thread *td, struct freebsd11_statfs *ubuf, long bufsize, int mode); int kern_fstat(struct thread *td, int fd, struct stat *sbp); int kern_fstatfs(struct thread *td, int fd, struct statfs *buf); int kern_fsync(struct thread *td, int fd, bool fullsync); int kern_ftruncate(struct thread *td, int fd, off_t length); int kern_futimes(struct thread *td, int fd, const struct timeval *tptr, enum uio_seg tptrseg); int kern_futimens(struct thread *td, int fd, const struct timespec *tptr, enum uio_seg tptrseg); int kern_getdirentries(struct thread *td, int fd, char *buf, size_t count, off_t *basep, ssize_t *residp, enum uio_seg bufseg); int kern_getfhat(struct thread *td, int flags, int fd, const char *path, enum uio_seg pathseg, fhandle_t *fhp, enum uio_seg fhseg); int kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize, size_t *countp, enum uio_seg bufseg, int mode); int kern_getitimer(struct thread *, u_int, struct itimerval *); int kern_getppid(struct thread *); -int kern_getpeername(struct thread *td, int fd, struct sockaddr **sa, - socklen_t *alen); +int kern_getpeername(struct thread *td, int fd, struct sockaddr *sa); int kern_getpriority(struct thread *td, int which, int who); int kern_getrusage(struct thread *td, int who, struct rusage *rup); int kern_getsid(struct thread *td, pid_t pid); -int kern_getsockname(struct thread *td, int fd, struct sockaddr **sa, - socklen_t *alen); +int kern_getsockname(struct thread *td, int fd, struct sockaddr *sa); int kern_getsockopt(struct thread *td, int s, int level, int name, void *optval, enum uio_seg valseg, socklen_t *valsize); int kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data); int kern_jail(struct thread *td, struct jail *j); int kern_jail_get(struct thread *td, struct uio *options, int flags); int kern_jail_set(struct thread *td, struct uio *options, int flags); int kern_kevent(struct thread *td, int fd, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout); int kern_kevent_anonymous(struct thread *td, int nevents, struct kevent_copyops *k_ops); int kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout); int kern_kill(struct thread *td, pid_t pid, int signum); int kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps); int kern_kldload(struct thread *td, const char *file, int *fileid); int kern_kldstat(struct thread *td, int fileid, struct kld_file_stat *stat); int kern_kldunload(struct thread *td, int fileid, int flags); int kern_linkat(struct thread *td, int fd1, int fd2, const char *path1, const char *path2, enum uio_seg segflg, int flag); int kern_listen(struct thread *td, int s, int backlog); int kern_lseek(struct thread *td, int fd, off_t offset, int whence); int kern_lutimes(struct thread *td, const char *path, enum uio_seg pathseg, const struct timeval *tptr, enum uio_seg tptrseg); int kern_madvise(struct thread *td, uintptr_t addr, size_t len, int behav); int kern_mincore(struct thread *td, uintptr_t addr, size_t len, char *vec); int kern_minherit(struct thread *td, uintptr_t addr, size_t len, int inherit); int kern_mkdirat(struct thread *td, int fd, const char *path, enum uio_seg segflg, int mode); int kern_membarrier(struct thread *td, int cmd, unsigned flags, int cpu_id); int kern_mkfifoat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int mode); int kern_mknodat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int mode, dev_t dev); int kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr, size_t len); int kern_mmap(struct thread *td, const struct mmap_req *mrp); int kern_mmap_racct_check(struct thread *td, struct vm_map *map, vm_size_t size); int kern_mmap_maxprot(struct proc *p, int prot); int kern_mprotect(struct thread *td, uintptr_t addr, size_t size, int prot, int flags); int kern_msgctl(struct thread *, int, int, struct msqid_ds *); int kern_msgrcv(struct thread *, int, void *, size_t, long, int, long *); int kern_msgsnd(struct thread *, int, const void *, size_t, int, long); int kern_msync(struct thread *td, uintptr_t addr, size_t size, int flags); int kern_munlock(struct thread *td, uintptr_t addr, size_t size); int kern_munmap(struct thread *td, uintptr_t addr, size_t size); int kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt); int kern_ntp_adjtime(struct thread *td, struct timex *ntv, int *retvalp); int kern_ogetdirentries(struct thread *td, struct ogetdirentries_args *uap, long *ploff); int kern_ommap(struct thread *td, uintptr_t hint, int len, int oprot, int oflags, int fd, long pos); int kern_openat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int flags, int mode); int kern_pathconf(struct thread *td, const char *path, enum uio_seg pathseg, int name, u_long flags, long *valuep); int kern_pipe(struct thread *td, int fildes[2], int flags, struct filecaps *fcaps1, struct filecaps *fcaps2); int kern_poll(struct thread *td, struct pollfd *fds, u_int nfds, struct timespec *tsp, sigset_t *uset); int kern_poll_kfds(struct thread *td, struct pollfd *fds, u_int nfds, struct timespec *tsp, sigset_t *uset); bool kern_poll_maxfds(u_int nfds); int kern_posix_error(struct thread *td, int error); int kern_posix_fadvise(struct thread *td, int fd, off_t offset, off_t len, int advice); int kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len); int kern_fspacectl(struct thread *td, int fd, int cmd, const struct spacectl_range *, int flags, struct spacectl_range *); int kern_procctl(struct thread *td, enum idtype idtype, id_t id, int com, void *data); int kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset); int kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset); int kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tvp, sigset_t *uset, int abi_nfdbits); int kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data); int kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte, off_t offset); int kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset); int kern_readlinkat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, char *buf, enum uio_seg bufseg, size_t count); int kern_readv(struct thread *td, int fd, struct uio *auio); int kern_recvit(struct thread *td, int s, struct msghdr *mp, enum uio_seg fromseg, struct mbuf **controlp); int kern_renameat(struct thread *td, int oldfd, const char *old, int newfd, const char *new, enum uio_seg pathseg); int kern_frmdirat(struct thread *td, int dfd, const char *path, int fd, enum uio_seg pathseg, int flag); int kern_sched_getparam(struct thread *td, struct thread *targettd, struct sched_param *param); int kern_sched_getscheduler(struct thread *td, struct thread *targettd, int *policy); int kern_sched_setparam(struct thread *td, struct thread *targettd, struct sched_param *param); int kern_sched_setscheduler(struct thread *td, struct thread *targettd, int policy, struct sched_param *param); int kern_sched_rr_get_interval(struct thread *td, pid_t pid, struct timespec *ts); int kern_sched_rr_get_interval_td(struct thread *td, struct thread *targettd, struct timespec *ts); int kern_semctl(struct thread *td, int semid, int semnum, int cmd, union semun *arg, register_t *rval); int kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits); int kern_sendit(struct thread *td, int s, struct msghdr *mp, int flags, struct mbuf *control, enum uio_seg segflg); int kern_setgroups(struct thread *td, u_int ngrp, gid_t *groups); int kern_setitimer(struct thread *, u_int, struct itimerval *, struct itimerval *); int kern_setpriority(struct thread *td, int which, int who, int prio); int kern_setrlimit(struct thread *, u_int, struct rlimit *); int kern_setsockopt(struct thread *td, int s, int level, int name, const void *optval, enum uio_seg valseg, socklen_t valsize); int kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp); int kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode, struct filecaps *fcaps); int kern_shm_open2(struct thread *td, const char *path, int flags, mode_t mode, int shmflags, struct filecaps *fcaps, const char *name); int kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg); int kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz); int kern_shutdown(struct thread *td, int s, int how); int kern_sigaction(struct thread *td, int sig, const struct sigaction *act, struct sigaction *oact, int flags); int kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss); int kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, int flags); int kern_sigsuspend(struct thread *td, sigset_t mask); int kern_sigtimedwait(struct thread *td, sigset_t waitset, struct ksiginfo *ksi, struct timespec *timeout); int kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value); int kern_socket(struct thread *td, int domain, int type, int protocol); int kern_statat(struct thread *td, int flag, int fd, const char *path, enum uio_seg pathseg, struct stat *sbp); int kern_specialfd(struct thread *td, int type, void *arg); int kern_statfs(struct thread *td, const char *path, enum uio_seg pathseg, struct statfs *buf); int kern_symlinkat(struct thread *td, const char *path1, int fd, const char *path2, enum uio_seg segflg); int kern_sync(struct thread *td); int kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, int *timerid, int preset_id); int kern_ktimer_delete(struct thread *, int); int kern_ktimer_settime(struct thread *td, int timer_id, int flags, struct itimerspec *val, struct itimerspec *oval); int kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val); int kern_ktimer_getoverrun(struct thread *td, int timer_id); int kern_semop(struct thread *td, int usemid, struct sembuf *usops, size_t nsops, struct timespec *timeout); int kern_thr_alloc(struct proc *, int pages, struct thread **); int kern_thr_exit(struct thread *td); int kern_thr_new(struct thread *td, struct thr_param *param); int kern_thr_suspend(struct thread *td, struct timespec *tsp); int kern_truncate(struct thread *td, const char *path, enum uio_seg pathseg, off_t length); int kern_funlinkat(struct thread *td, int dfd, const char *path, int fd, enum uio_seg pathseg, int flag, ino_t oldinum); int kern_utimesat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, const struct timeval *tptr, enum uio_seg tptrseg); int kern_utimensat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, const struct timespec *tptr, enum uio_seg tptrseg, int flag); int kern_wait(struct thread *td, pid_t pid, int *status, int options, struct rusage *rup); int kern_wait6(struct thread *td, enum idtype idtype, id_t id, int *status, int options, struct __wrusage *wrup, siginfo_t *sip); int kern_writev(struct thread *td, int fd, struct uio *auio); int kern_socketpair(struct thread *td, int domain, int type, int protocol, int *rsv); int kern_unmount(struct thread *td, const char *path, int flags); /* flags for kern_sigaction */ #define KSA_OSIGSET 0x0001 /* uses osigact_t */ #define KSA_FREEBSD4 0x0002 /* uses ucontext4 */ struct freebsd11_dirent; int freebsd11_kern_getdirentries(struct thread *td, int fd, char *ubuf, u_int count, long *basep, void (*func)(struct freebsd11_dirent *)); #endif /* !_SYS_SYSCALLSUBR_H_ */