Index: projects/nfs-over-tls/sys/rpc/clnt_vc.c =================================================================== --- projects/nfs-over-tls/sys/rpc/clnt_vc.c (revision 363117) +++ projects/nfs-over-tls/sys/rpc/clnt_vc.c (revision 363118) @@ -1,1329 +1,1334 @@ /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC"; static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro"; #endif #include __FBSDID("$FreeBSD$"); /* * clnt_tcp.c, Implements a TCP/IP based, client side RPC. * * Copyright (C) 1984, Sun Microsystems, Inc. * * TCP based RPC supports 'batched calls'. * A sequence of calls may be batched-up in a send buffer. The rpc call * return immediately to the client even though the call was not necessarily * sent. The batching occurs if the results' xdr routine is NULL (0) AND * the rpc timeout value is zero (see clnt.h, rpc). * * Clients should NOT casually batch calls that in fact return results; that is, * the server side should be aware that a call is batched and not produce any * return message. Batched calls that produce many result messages can * deadlock (netlock) the client and the server.... * * Now go hang yourself. */ #include "opt_kern_tls.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct cmessage { struct cmsghdr cmsg; struct cmsgcred cmcred; }; static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *, rpcproc_t, struct mbuf *, struct mbuf **, struct timeval); static void clnt_vc_geterr(CLIENT *, struct rpc_err *); static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); static void clnt_vc_abort(CLIENT *); static bool_t clnt_vc_control(CLIENT *, u_int, void *); static void clnt_vc_close(CLIENT *); static void clnt_vc_destroy(CLIENT *); static bool_t time_not_ok(struct timeval *); static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag); static void clnt_vc_dotlsupcall(void *data); static struct clnt_ops clnt_vc_ops = { .cl_call = clnt_vc_call, .cl_abort = clnt_vc_abort, .cl_geterr = clnt_vc_geterr, .cl_freeres = clnt_vc_freeres, .cl_close = clnt_vc_close, .cl_destroy = clnt_vc_destroy, .cl_control = clnt_vc_control }; static void clnt_vc_upcallsdone(struct ct_data *); static int fake_wchan; /* * Create a client handle for a connection. * Default options are set, which the user can change using clnt_control()'s. * The rpc/vc package does buffering similar to stdio, so the client * must pick send and receive buffer sizes, 0 => use the default. * NB: fd is copied into a private area. * NB: The rpch->cl_auth is set null authentication. Caller may wish to * set this something more useful. * * fd should be an open socket */ CLIENT * clnt_vc_create( struct socket *so, /* open file descriptor */ struct sockaddr *raddr, /* servers address */ const rpcprog_t prog, /* program number */ const rpcvers_t vers, /* version number */ size_t sendsz, /* buffer recv size */ size_t recvsz, /* buffer send size */ int intrflag) /* interruptible */ { CLIENT *cl; /* client handle */ struct ct_data *ct = NULL; /* client handle */ struct timeval now; struct rpc_msg call_msg; static uint32_t disrupt; struct __rpc_sockinfo si; XDR xdrs; int error, interrupted, one = 1, sleep_flag; struct sockopt sopt; if (disrupt == 0) disrupt = (uint32_t)(long)raddr; cl = (CLIENT *)mem_alloc(sizeof (*cl)); ct = (struct ct_data *)mem_alloc(sizeof (*ct)); mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF); ct->ct_threads = 0; ct->ct_closing = FALSE; ct->ct_closed = FALSE; ct->ct_upcallrefs = 0; ct->ct_rcvstate = RPCRCVSTATE_NORMAL; if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { error = soconnect(so, raddr, curthread); SOCK_LOCK(so); interrupted = 0; sleep_flag = PSOCK; if (intrflag != 0) sleep_flag |= PCATCH; while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { error = msleep(&so->so_timeo, SOCK_MTX(so), sleep_flag, "connec", 0); if (error) { if (error == EINTR || error == ERESTART) interrupted = 1; break; } } if (error == 0) { error = so->so_error; so->so_error = 0; } SOCK_UNLOCK(so); if (error) { if (!interrupted) so->so_state &= ~SS_ISCONNECTING; rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = error; goto err; } } if (!__rpc_socket2sockinfo(so, &si)) { goto err; } if (so->so_proto->pr_flags & PR_CONNREQUIRED) { bzero(&sopt, sizeof(sopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_KEEPALIVE; sopt.sopt_val = &one; sopt.sopt_valsize = sizeof(one); sosetopt(so, &sopt); } if (so->so_proto->pr_protocol == IPPROTO_TCP) { bzero(&sopt, sizeof(sopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = &one; sopt.sopt_valsize = sizeof(one); sosetopt(so, &sopt); } ct->ct_closeit = FALSE; /* * Set up private data struct */ ct->ct_socket = so; ct->ct_wait.tv_sec = -1; ct->ct_wait.tv_usec = -1; memcpy(&ct->ct_addr, raddr, raddr->sa_len); /* * Initialize call message */ getmicrotime(&now); ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now); call_msg.rm_xid = ct->ct_xid; call_msg.rm_direction = CALL; call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; call_msg.rm_call.cb_prog = (uint32_t)prog; call_msg.rm_call.cb_vers = (uint32_t)vers; /* * pre-serialize the static part of the call msg and stash it away */ xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE, XDR_ENCODE); if (! xdr_callhdr(&xdrs, &call_msg)) { if (ct->ct_closeit) { soclose(ct->ct_socket); } goto err; } ct->ct_mpos = XDR_GETPOS(&xdrs); XDR_DESTROY(&xdrs); ct->ct_waitchan = "rpcrecv"; ct->ct_waitflag = 0; /* * Create a client handle which uses xdrrec for serialization * and authnone for authentication. */ sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); error = soreserve(ct->ct_socket, sendsz, recvsz); if (error != 0) { if (ct->ct_closeit) { soclose(ct->ct_socket); } goto err; } cl->cl_refs = 1; cl->cl_ops = &clnt_vc_ops; cl->cl_private = ct; cl->cl_auth = authnone_create(); SOCKBUF_LOCK(&ct->ct_socket->so_rcv); soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct); SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); ct->ct_raw = NULL; ct->ct_record = NULL; ct->ct_record_resid = 0; ct->ct_sslrefno = 0; TAILQ_INIT(&ct->ct_pending); return (cl); err: mtx_destroy(&ct->ct_lock); mem_free(ct, sizeof (struct ct_data)); mem_free(cl, sizeof (CLIENT)); return ((CLIENT *)NULL); } static enum clnt_stat clnt_vc_call( CLIENT *cl, /* client handle */ struct rpc_callextra *ext, /* call metadata */ rpcproc_t proc, /* procedure number */ struct mbuf *args, /* pointer to args */ struct mbuf **resultsp, /* pointer to results */ struct timeval utimeout) { struct ct_data *ct = (struct ct_data *) cl->cl_private; AUTH *auth; struct rpc_err *errp; enum clnt_stat stat; XDR xdrs; struct rpc_msg reply_msg; bool_t ok; int nrefreshes = 2; /* number of times to refresh cred */ struct timeval timeout; uint32_t xid; struct mbuf *mreq = NULL, *results; struct ct_request *cr; int error, maxextsiz, trycnt; #ifdef KERN_TLS u_int maxlen; #endif cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK); mtx_lock(&ct->ct_lock); if (ct->ct_closing || ct->ct_closed) { mtx_unlock(&ct->ct_lock); free(cr, M_RPC); return (RPC_CANTSEND); } ct->ct_threads++; if (ext) { auth = ext->rc_auth; errp = &ext->rc_err; } else { auth = cl->cl_auth; errp = &ct->ct_error; } cr->cr_mrep = NULL; cr->cr_error = 0; if (ct->ct_wait.tv_usec == -1) { timeout = utimeout; /* use supplied timeout */ } else { timeout = ct->ct_wait; /* use default timeout */ } /* * After 15sec of looping, allow it to return RPC_CANTSEND, which will * cause the clnt_reconnect layer to create a new TCP connection. */ trycnt = 15 * hz; call_again: mtx_assert(&ct->ct_lock, MA_OWNED); if (ct->ct_closing || ct->ct_closed) { ct->ct_threads--; wakeup(ct); mtx_unlock(&ct->ct_lock); free(cr, M_RPC); return (RPC_CANTSEND); } ct->ct_xid++; xid = ct->ct_xid; mtx_unlock(&ct->ct_lock); /* * Leave space to pre-pend the record mark. */ mreq = m_gethdr(M_WAITOK, MT_DATA); mreq->m_data += sizeof(uint32_t); KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN, ("RPC header too big")); bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos); mreq->m_len = ct->ct_mpos; /* * The XID is the first thing in the request. */ *mtod(mreq, uint32_t *) = htonl(xid); xdrmbuf_create(&xdrs, mreq, XDR_ENCODE); errp->re_status = stat = RPC_SUCCESS; if ((! XDR_PUTINT32(&xdrs, &proc)) || (! AUTH_MARSHALL(auth, xid, &xdrs, m_copym(args, 0, M_COPYALL, M_WAITOK)))) { errp->re_status = stat = RPC_CANTENCODEARGS; mtx_lock(&ct->ct_lock); goto out; } mreq->m_pkthdr.len = m_length(mreq, NULL); /* * Prepend a record marker containing the packet length. */ M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK); *mtod(mreq, uint32_t *) = htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t))); cr->cr_xid = xid; mtx_lock(&ct->ct_lock); /* * Check to see if the other end has already started to close down * the connection. The upcall will have set ct_error.re_status * to RPC_CANTRECV if this is the case. * If the other end starts to close down the connection after this * point, it will be detected later when cr_error is checked, * since the request is in the ct_pending queue. */ if (ct->ct_error.re_status == RPC_CANTRECV) { if (errp != &ct->ct_error) { errp->re_errno = ct->ct_error.re_errno; errp->re_status = RPC_CANTRECV; } stat = RPC_CANTRECV; goto out; } /* For TLS, wait for an upcall to be done, as required. */ while ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL | RPCRCVSTATE_NONAPPDATA)) == 0) msleep(&ct->ct_rcvstate, &ct->ct_lock, 0, "rpcrcvst", hz); TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link); mtx_unlock(&ct->ct_lock); if (ct->ct_sslrefno != 0) { /* * Copy the mbuf chain to a chain of ext_pgs mbuf(s) * as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mreq = _rpc_copym_into_ext_pgs(mreq, maxextsiz); } /* * sosend consumes mreq. */ error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread); mreq = NULL; if (error == EMSGSIZE || (error == ERESTART && (ct->ct_waitflag & PCATCH) == 0 && trycnt-- > 0)) { SOCKBUF_LOCK(&ct->ct_socket->so_snd); sbwait(&ct->ct_socket->so_snd); SOCKBUF_UNLOCK(&ct->ct_socket->so_snd); AUTH_VALIDATE(auth, xid, NULL, NULL); mtx_lock(&ct->ct_lock); TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); /* Sleep for 1 clock tick before trying the sosend() again. */ msleep(&fake_wchan, &ct->ct_lock, 0, "rpclpsnd", 1); printf("TRY AGAIN!!\n"); goto call_again; } reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL; reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf; reply_msg.acpted_rply.ar_verf.oa_length = 0; reply_msg.acpted_rply.ar_results.where = NULL; reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; mtx_lock(&ct->ct_lock); if (error) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_errno = error; errp->re_status = stat = RPC_CANTSEND; goto out; } /* * Check to see if we got an upcall while waiting for the * lock. In both these cases, the request has been removed * from ct->ct_pending. */ if (cr->cr_error) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_errno = cr->cr_error; errp->re_status = stat = RPC_CANTRECV; goto out; } if (cr->cr_mrep) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); goto got_reply; } /* * Hack to provide rpc-based message passing */ if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_status = stat = RPC_TIMEDOUT; goto out; } error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan, tvtohz(&timeout)); TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); if (error) { /* * The sleep returned an error so our request is still * on the list. Turn the error code into an * appropriate client status. */ errp->re_errno = error; switch (error) { case EINTR: stat = RPC_INTR; break; case EWOULDBLOCK: stat = RPC_TIMEDOUT; break; default: stat = RPC_CANTRECV; } errp->re_status = stat; goto out; } else { /* * We were woken up by the upcall. If the * upcall had a receive error, report that, * otherwise we have a reply. */ if (cr->cr_error) { errp->re_errno = cr->cr_error; errp->re_status = stat = RPC_CANTRECV; goto out; } } got_reply: /* * Now decode and validate the response. We need to drop the * lock since xdr_replymsg may end up sleeping in malloc. */ mtx_unlock(&ct->ct_lock); if (ext && ext->rc_feedback) ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg); xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE); ok = xdr_replymsg(&xdrs, &reply_msg); cr->cr_mrep = NULL; if (ok) { if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) && (reply_msg.acpted_rply.ar_stat == SUCCESS)) errp->re_status = stat = RPC_SUCCESS; else stat = _seterr_reply(&reply_msg, errp); if (stat == RPC_SUCCESS) { results = xdrmbuf_getall(&xdrs); if (!AUTH_VALIDATE(auth, xid, &reply_msg.acpted_rply.ar_verf, &results)) { errp->re_status = stat = RPC_AUTHERROR; errp->re_why = AUTH_INVALIDRESP; } else { KASSERT(results, ("auth validated but no result")); *resultsp = results; } } /* end successful completion */ /* * If unsuccessful AND error is an authentication error * then refresh credentials and try again, else break */ else if (stat == RPC_AUTHERROR) /* maybe our credentials need to be refreshed ... */ if (nrefreshes > 0 && AUTH_REFRESH(auth, &reply_msg)) { nrefreshes--; XDR_DESTROY(&xdrs); mtx_lock(&ct->ct_lock); goto call_again; } /* end of unsuccessful completion */ } /* end of valid reply message */ else { errp->re_status = stat = RPC_CANTDECODERES; } XDR_DESTROY(&xdrs); mtx_lock(&ct->ct_lock); out: mtx_assert(&ct->ct_lock, MA_OWNED); KASSERT(stat != RPC_SUCCESS || *resultsp, ("RPC_SUCCESS without reply")); if (mreq) m_freem(mreq); if (cr->cr_mrep) m_freem(cr->cr_mrep); ct->ct_threads--; if (ct->ct_closing) wakeup(ct); mtx_unlock(&ct->ct_lock); if (auth && stat != RPC_SUCCESS) AUTH_VALIDATE(auth, xid, NULL, NULL); free(cr, M_RPC); return (stat); } static void clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp) { struct ct_data *ct = (struct ct_data *) cl->cl_private; *errp = ct->ct_error; } static bool_t clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) { XDR xdrs; bool_t dummy; xdrs.x_op = XDR_FREE; dummy = (*xdr_res)(&xdrs, res_ptr); return (dummy); } /*ARGSUSED*/ static void clnt_vc_abort(CLIENT *cl) { } static bool_t clnt_vc_control(CLIENT *cl, u_int request, void *info) { struct ct_data *ct = (struct ct_data *)cl->cl_private; void *infop = info; SVCXPRT *xprt; uint64_t *p; int error; static u_int thrdnum = 0; mtx_lock(&ct->ct_lock); switch (request) { case CLSET_FD_CLOSE: ct->ct_closeit = TRUE; mtx_unlock(&ct->ct_lock); return (TRUE); case CLSET_FD_NCLOSE: ct->ct_closeit = FALSE; mtx_unlock(&ct->ct_lock); return (TRUE); default: break; } /* for other requests which use info */ if (info == NULL) { mtx_unlock(&ct->ct_lock); return (FALSE); } switch (request) { case CLSET_TIMEOUT: if (time_not_ok((struct timeval *)info)) { mtx_unlock(&ct->ct_lock); return (FALSE); } ct->ct_wait = *(struct timeval *)infop; break; case CLGET_TIMEOUT: *(struct timeval *)infop = ct->ct_wait; break; case CLGET_SERVER_ADDR: (void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len); break; case CLGET_SVC_ADDR: /* * Slightly different semantics to userland - we use * sockaddr instead of netbuf. */ memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len); break; case CLSET_SVC_ADDR: /* set to new address */ mtx_unlock(&ct->ct_lock); return (FALSE); case CLGET_XID: *(uint32_t *)info = ct->ct_xid; break; case CLSET_XID: /* This will set the xid of the NEXT call */ /* decrement by 1 as clnt_vc_call() increments once */ ct->ct_xid = *(uint32_t *)info - 1; break; case CLGET_VERS: /* * This RELIES on the information that, in the call body, * the version number field is the fifth field from the * beginning of the RPC header. MUST be changed if the * call_struct is changed */ *(uint32_t *)info = ntohl(*(uint32_t *)(void *)(ct->ct_mcallc + 4 * BYTES_PER_XDR_UNIT)); break; case CLSET_VERS: *(uint32_t *)(void *)(ct->ct_mcallc + 4 * BYTES_PER_XDR_UNIT) = htonl(*(uint32_t *)info); break; case CLGET_PROG: /* * This RELIES on the information that, in the call body, * the program number field is the fourth field from the * beginning of the RPC header. MUST be changed if the * call_struct is changed */ *(uint32_t *)info = ntohl(*(uint32_t *)(void *)(ct->ct_mcallc + 3 * BYTES_PER_XDR_UNIT)); break; case CLSET_PROG: *(uint32_t *)(void *)(ct->ct_mcallc + 3 * BYTES_PER_XDR_UNIT) = htonl(*(uint32_t *)info); break; case CLSET_WAITCHAN: ct->ct_waitchan = (const char *)info; break; case CLGET_WAITCHAN: *(const char **) info = ct->ct_waitchan; break; case CLSET_INTERRUPTIBLE: if (*(int *) info) ct->ct_waitflag = PCATCH; else ct->ct_waitflag = 0; break; case CLGET_INTERRUPTIBLE: if (ct->ct_waitflag) *(int *) info = TRUE; else *(int *) info = FALSE; break; case CLSET_BACKCHANNEL: xprt = (SVCXPRT *)info; if (ct->ct_backchannelxprt == NULL) { xprt->xp_p2 = ct; if (ct->ct_sslrefno != 0) xprt->xp_tls = RPCTLS_FLAGS_HANDSHAKE; ct->ct_backchannelxprt = xprt; printf("backch tls=0x%x xprt=%p\n", xprt->xp_tls, xprt); } break; case CLSET_TLS: p = (uint64_t *)info; ct->ct_sslsec = *p++; ct->ct_sslusec = *p++; ct->ct_sslrefno = *p; if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) { mtx_unlock(&ct->ct_lock); /* Start the kthread that handles upcalls. */ error = kthread_add(clnt_vc_dotlsupcall, ct, NULL, NULL, 0, 0, "krpctls%u", thrdnum++); if (error != 0) panic("Can't add KRPC thread error %d", error); } else mtx_unlock(&ct->ct_lock); return (TRUE); case CLSET_BLOCKRCV: if (*(int *) info) { ct->ct_rcvstate &= ~RPCRCVSTATE_NORMAL; ct->ct_rcvstate |= RPCRCVSTATE_TLSHANDSHAKE; } else { ct->ct_rcvstate &= ~RPCRCVSTATE_TLSHANDSHAKE; ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; } break; default: mtx_unlock(&ct->ct_lock); return (FALSE); } mtx_unlock(&ct->ct_lock); return (TRUE); } static void clnt_vc_close(CLIENT *cl) { struct ct_data *ct = (struct ct_data *) cl->cl_private; struct ct_request *cr; mtx_lock(&ct->ct_lock); if (ct->ct_closed) { mtx_unlock(&ct->ct_lock); return; } if (ct->ct_closing) { while (ct->ct_closing) msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); KASSERT(ct->ct_closed, ("client should be closed")); mtx_unlock(&ct->ct_lock); return; } if (ct->ct_socket) { ct->ct_closing = TRUE; mtx_unlock(&ct->ct_lock); SOCKBUF_LOCK(&ct->ct_socket->so_rcv); if (ct->ct_socket->so_rcv.sb_upcall != NULL) { soupcall_clear(ct->ct_socket, SO_RCV); clnt_vc_upcallsdone(ct); } SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); /* * Abort any pending requests and wait until everyone * has finished with clnt_vc_call. */ mtx_lock(&ct->ct_lock); TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { cr->cr_xid = 0; cr->cr_error = ESHUTDOWN; wakeup(cr); } while (ct->ct_threads) msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); } ct->ct_closing = FALSE; ct->ct_closed = TRUE; wakeup(&ct->ct_sslrefno); mtx_unlock(&ct->ct_lock); wakeup(ct); } static void clnt_vc_destroy(CLIENT *cl) { struct ct_data *ct = (struct ct_data *) cl->cl_private; struct socket *so = NULL; SVCXPRT *xprt; enum clnt_stat stat; uint32_t reterr; clnt_vc_close(cl); mtx_lock(&ct->ct_lock); xprt = ct->ct_backchannelxprt; ct->ct_backchannelxprt = NULL; if (xprt != NULL) { mtx_unlock(&ct->ct_lock); /* To avoid a LOR. */ sx_xlock(&xprt->xp_lock); mtx_lock(&ct->ct_lock); xprt->xp_p2 = NULL; sx_xunlock(&xprt->xp_lock); } if (ct->ct_socket) { if (ct->ct_closeit) { so = ct->ct_socket; } } /* Wait for the upcall kthread to terminate. */ while ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLTHREAD) != 0) msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, "clntvccl", hz); mtx_unlock(&ct->ct_lock); mtx_destroy(&ct->ct_lock); if (so) { if (ct->ct_sslrefno != 0) { /* * If the TLS handshake is in progress, the upcall * will fail, but the socket should be closed by the * daemon, since the connect upcall has just failed. */ if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) { /* * If the upcall fails, the socket has * probably been closed via the rpctlscd * daemon having crashed or been * restarted, so ignore return stat. */ stat = rpctls_cl_disconnect(ct->ct_sslsec, ct->ct_sslusec, ct->ct_sslrefno, &reterr); } + /* Must sorele() to get rid of reference. */ + CURVNET_SET(so->so_vnet); + SOCK_LOCK(so); + sorele(so); + CURVNET_RESTORE(); } else { soshutdown(so, SHUT_WR); soclose(so); } } m_freem(ct->ct_record); m_freem(ct->ct_raw); mem_free(ct, sizeof(struct ct_data)); if (cl->cl_netid && cl->cl_netid[0]) mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); if (cl->cl_tp && cl->cl_tp[0]) mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); mem_free(cl, sizeof(CLIENT)); } /* * Make sure that the time is not garbage. -1 value is disallowed. * Note this is different from time_not_ok in clnt_dg.c */ static bool_t time_not_ok(struct timeval *t) { return (t->tv_sec <= -1 || t->tv_sec > 100000000 || t->tv_usec <= -1 || t->tv_usec > 1000000); } int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag) { struct ct_data *ct = (struct ct_data *) arg; struct uio uio; struct mbuf *m, *m2, **ctrlp; struct ct_request *cr; int error, rcvflag, foundreq; uint32_t xid_plus_direction[2], header; SVCXPRT *xprt; struct cf_conn *cd; u_int rawlen; struct cmsghdr *cmsg; struct tls_get_record tgr; /* * RPC-over-TLS needs to block reception during * upcalls since the upcall will be doing I/O on * the socket via openssl library calls. */ mtx_lock(&ct->ct_lock); if ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL | RPCRCVSTATE_NONAPPDATA)) == 0) { /* Mark that a socket upcall needs to be done. */ if ((ct->ct_rcvstate & (RPCRCVSTATE_UPCALLNEEDED | RPCRCVSTATE_UPCALLINPROG)) != 0) ct->ct_rcvstate |= RPCRCVSTATE_SOUPCALLNEEDED; mtx_unlock(&ct->ct_lock); return (SU_OK); } mtx_unlock(&ct->ct_lock); /* * If another thread is already here, it must be in * soreceive(), so just return to avoid races with it. * ct_upcallrefs is protected by the SOCKBUF_LOCK(), * which is held in this function, except when * soreceive() is called. */ if (ct->ct_upcallrefs > 0) return (SU_OK); ct->ct_upcallrefs++; /* * Read as much as possible off the socket and link it * onto ct_raw. */ for (;;) { uio.uio_resid = 1000000000; uio.uio_td = curthread; m2 = m = NULL; rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK; if (ct->ct_sslrefno != 0 && (ct->ct_rcvstate & RPCRCVSTATE_NORMAL) != 0) { rcvflag |= MSG_TLSAPPDATA; ctrlp = NULL; } else ctrlp = &m2; SOCKBUF_UNLOCK(&so->so_rcv); error = soreceive(so, NULL, &uio, &m, ctrlp, &rcvflag); SOCKBUF_LOCK(&so->so_rcv); if (error == EWOULDBLOCK) { /* * We must re-test for readability after * taking the lock to protect us in the case * where a new packet arrives on the socket * after our call to soreceive fails with * EWOULDBLOCK. */ if (so->so_rcv.sb_lowat > 1) printf("lowat=%d\n", so->so_rcv.sb_lowat); error = 0; if (!soreadable(so)) break; continue; } if (error == 0 && m == NULL) { /* * We must have got EOF trying * to read from the stream. */ error = ECONNRESET; } /* * A return of ENXIO indicates that there is a * non-application data record at the head of the * socket's receive queue, for TLS connections. * This record needs to be handled in userland * via an SSL_read() call, so do an upcall to the daemon. */ if (ct->ct_sslrefno != 0 && error == ENXIO) { /* Disable reception, marking an upcall needed. */ mtx_lock(&ct->ct_lock); ct->ct_rcvstate |= RPCRCVSTATE_UPCALLNEEDED; /* * If an upcall in needed, wake up the kthread * that runs clnt_vc_dotlsupcall(). */ wakeup(&ct->ct_sslrefno); mtx_unlock(&ct->ct_lock); printf("Mark upcallneeded\n"); break; } if (error != 0) break; /* Process any record header(s). */ if (m2 != NULL) { cmsg = mtod(m2, struct cmsghdr *); if (cmsg->cmsg_type == TLS_GET_RECORD && cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) { memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr)); /* * This should have been handled by * setting RPCRCVSTATE_UPCALLNEEDED in * ct_rcvstate but if not, all we can do * is toss it away. */ if (tgr.tls_type != TLS_RLTYPE_APP) { printf("Got weird type=%d\n", tgr.tls_type); m_freem(m); m_free(m2); mtx_lock(&ct->ct_lock); ct->ct_rcvstate &= ~RPCRCVSTATE_NONAPPDATA; ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; mtx_unlock(&ct->ct_lock); continue; } } m_free(m2); } if (ct->ct_raw != NULL) m_last(ct->ct_raw)->m_next = m; else ct->ct_raw = m; } rawlen = m_length(ct->ct_raw, NULL); /* Now, process as much of ct_raw as possible. */ for (;;) { /* * If ct_record_resid is zero, we are waiting for a * record mark. */ if (ct->ct_record_resid == 0) { if (rawlen < sizeof(uint32_t)) break; m_copydata(ct->ct_raw, 0, sizeof(uint32_t), (char *)&header); header = ntohl(header); ct->ct_record_resid = header & 0x7fffffff; ct->ct_record_eor = ((header & 0x80000000) != 0); m_adj(ct->ct_raw, sizeof(uint32_t)); rawlen -= sizeof(uint32_t); } else { /* * Move as much of the record as possible to * ct_record. */ if (rawlen == 0) break; if (rawlen <= ct->ct_record_resid) { if (ct->ct_record != NULL) m_last(ct->ct_record)->m_next = ct->ct_raw; else ct->ct_record = ct->ct_raw; ct->ct_raw = NULL; ct->ct_record_resid -= rawlen; rawlen = 0; } else { m = m_split(ct->ct_raw, ct->ct_record_resid, M_NOWAIT); if (m == NULL) { printf("soup m_split returned NULL\n"); break; } if (ct->ct_record != NULL) m_last(ct->ct_record)->m_next = ct->ct_raw; else ct->ct_record = ct->ct_raw; rawlen -= ct->ct_record_resid; ct->ct_record_resid = 0; ct->ct_raw = m; } if (ct->ct_record_resid > 0) break; /* * If we have the entire record, see if we can * match it to a request. */ if (ct->ct_record_eor) { /* * The XID is in the first uint32_t of * the reply and the message direction * is the second one. */ if (ct->ct_record->m_len < sizeof(xid_plus_direction) && m_length(ct->ct_record, NULL) < sizeof(xid_plus_direction)) { /* * What to do now? * The data in the TCP stream is * corrupted such that there is no * valid RPC message to parse. * I think it best to close this * connection and allow * clnt_reconnect_call() to try * and establish a new one. */ printf("clnt_vc_soupcall: " "connection data corrupted\n"); error = ECONNRESET; goto wakeup_all; } m_copydata(ct->ct_record, 0, sizeof(xid_plus_direction), (char *)xid_plus_direction); xid_plus_direction[0] = ntohl(xid_plus_direction[0]); xid_plus_direction[1] = ntohl(xid_plus_direction[1]); /* Check message direction. */ if (xid_plus_direction[1] == CALL) { printf("Got backchannel callback\n"); /* This is a backchannel request. */ mtx_lock(&ct->ct_lock); xprt = ct->ct_backchannelxprt; printf("backxprt=%p\n", xprt); if (xprt == NULL) { mtx_unlock(&ct->ct_lock); /* Just throw it away. */ m_freem(ct->ct_record); ct->ct_record = NULL; } else { cd = (struct cf_conn *) xprt->xp_p1; m2 = cd->mreq; /* * The requests are chained * in the m_nextpkt list. */ while (m2 != NULL && m2->m_nextpkt != NULL) /* Find end of list. */ m2 = m2->m_nextpkt; if (m2 != NULL) m2->m_nextpkt = ct->ct_record; else cd->mreq = ct->ct_record; ct->ct_record->m_nextpkt = NULL; ct->ct_record = NULL; xprt_active(xprt); mtx_unlock(&ct->ct_lock); } } else { mtx_lock(&ct->ct_lock); foundreq = 0; TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { if (cr->cr_xid == xid_plus_direction[0]) { /* * This one * matches. We leave * the reply mbuf in * cr->cr_mrep. Set * the XID to zero so * that we will ignore * any duplicated * replies. */ cr->cr_xid = 0; cr->cr_mrep = ct->ct_record; cr->cr_error = 0; foundreq = 1; wakeup(cr); break; } } mtx_unlock(&ct->ct_lock); if (!foundreq) m_freem(ct->ct_record); ct->ct_record = NULL; } } } } if (error != 0) { wakeup_all: /* * This socket is broken, so mark that it cannot * receive and fail all RPCs waiting for a reply * on it, so that they will be retried on a new * TCP connection created by clnt_reconnect_X(). */ mtx_lock(&ct->ct_lock); ct->ct_error.re_status = RPC_CANTRECV; ct->ct_error.re_errno = error; TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { cr->cr_error = error; wakeup(cr); } mtx_unlock(&ct->ct_lock); } ct->ct_upcallrefs--; if (ct->ct_upcallrefs < 0) panic("rpcvc upcall refcnt"); if (ct->ct_upcallrefs == 0) wakeup(&ct->ct_upcallrefs); return (SU_OK); } /* * Wait for all upcalls in progress to complete. */ static void clnt_vc_upcallsdone(struct ct_data *ct) { SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv); while (ct->ct_upcallrefs > 0) (void) msleep(&ct->ct_upcallrefs, SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0); } /* * Do a TLS upcall to the rpctlscd daemon, as required. * This function runs as a kthread. */ static void clnt_vc_dotlsupcall(void *data) { struct ct_data *ct = (struct ct_data *)data; enum clnt_stat ret; uint32_t reterr; mtx_lock(&ct->ct_lock); ct->ct_rcvstate |= RPCRCVSTATE_UPCALLTHREAD; while (!ct->ct_closed) { if ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLNEEDED) != 0) { ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLNEEDED; ct->ct_rcvstate |= RPCRCVSTATE_UPCALLINPROG; if (ct->ct_sslrefno != 0 && ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) { mtx_unlock(&ct->ct_lock); printf("at handlerecord\n"); ret = rpctls_cl_handlerecord(ct->ct_sslsec, ct->ct_sslusec, ct->ct_sslrefno, &reterr); printf("aft handlerecord=%d\n", ret); mtx_lock(&ct->ct_lock); } ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLINPROG; if (ret == RPC_SUCCESS && reterr == RPCTLSERR_OK) ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; else ct->ct_rcvstate |= RPCRCVSTATE_NONAPPDATA; wakeup(&ct->ct_rcvstate); } if ((ct->ct_rcvstate & RPCRCVSTATE_SOUPCALLNEEDED) != 0) { ct->ct_rcvstate &= ~RPCRCVSTATE_SOUPCALLNEEDED; mtx_unlock(&ct->ct_lock); SOCKBUF_LOCK(&ct->ct_socket->so_rcv); clnt_vc_soupcall(ct->ct_socket, ct, M_NOWAIT); SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); mtx_lock(&ct->ct_lock); } msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, "clntvcdu", hz); } ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLTHREAD; wakeup(&ct->ct_sslrefno); mtx_unlock(&ct->ct_lock); kthread_exit(); } Index: projects/nfs-over-tls/sys/rpc/rpcsec_tls/rpctls_impl.c =================================================================== --- projects/nfs-over-tls/sys/rpc/rpcsec_tls/rpctls_impl.c (revision 363117) +++ projects/nfs-over-tls/sys/rpc/rpcsec_tls/rpctls_impl.c (revision 363118) @@ -1,773 +1,775 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Modified from the kernel GSSAPI code for RPC-over-TLS. */ #include __FBSDID("$FreeBSD$"); #include "opt_kern_tls.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rpctlscd.h" #include "rpctlssd.h" extern struct fileops badfileops; /* * Syscall hooks */ static struct syscall_helper_data rpctls_syscalls[] = { SYSCALL_INIT_HELPER(rpctls_syscall), SYSCALL_INIT_LAST }; static CLIENT *rpctls_connect_handle; static struct mtx rpctls_connect_lock; static struct socket *rpctls_connect_so = NULL; static CLIENT *rpctls_connect_cl = NULL; static CLIENT *rpctls_server_handle; static struct mtx rpctls_server_lock; static struct socket *rpctls_server_so = NULL; static SVCXPRT *rpctls_server_xprt = NULL; static struct opaque_auth rpctls_null_verf; static CLIENT *rpctls_connect_client(void); static CLIENT *rpctls_server_client(void); static enum clnt_stat rpctls_server(SVCXPRT *xprt, struct socket *so, uint32_t *flags, uint64_t *sslp, uid_t *uid, int *ngrps, gid_t **gids); int rpctls_init(void) { int error; error = syscall_helper_register(rpctls_syscalls, SY_THR_STATIC_KLD); if (error != 0) { printf("rpctls_init: cannot register syscall\n"); return (error); } mtx_init(&rpctls_connect_lock, "rpctls_connect_lock", NULL, MTX_DEF); mtx_init(&rpctls_server_lock, "rpctls_server_lock", NULL, MTX_DEF); rpctls_null_verf.oa_flavor = AUTH_NULL; rpctls_null_verf.oa_base = RPCTLS_START_STRING; rpctls_null_verf.oa_length = strlen(RPCTLS_START_STRING); return (0); } int sys_rpctls_syscall(struct thread *td, struct rpctls_syscall_args *uap) { struct sockaddr_un sun; struct netconfig *nconf; struct file *fp; struct socket *so; SVCXPRT *xprt; char path[MAXPATHLEN]; int fd = -1, error, try_count; CLIENT *cl, *oldcl, *concl; uint64_t ssl[3]; struct timeval timeo; #ifdef KERN_TLS u_int maxlen; #endif printf("in rpctls syscall\n"); error = priv_check(td, PRIV_NFS_DAEMON); printf("aft priv_check=%d\n", error); if (error != 0) return (error); switch (uap->op) { case RPCTLS_SYSC_CLSETPATH: error = copyinstr(uap->path, path, sizeof(path), NULL); printf("setting err=%d path=%s\n", error, path); if (error == 0) { error = ENXIO; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) error = 0; #endif } if (error == 0 && (strlen(path) + 1 > sizeof(sun.sun_path) || strlen(path) == 0)) error = EINVAL; cl = NULL; if (error == 0) { sun.sun_family = AF_LOCAL; strlcpy(sun.sun_path, path, sizeof(sun.sun_path)); sun.sun_len = SUN_LEN(&sun); nconf = getnetconfigent("local"); cl = clnt_reconnect_create(nconf, (struct sockaddr *)&sun, RPCTLSCD, RPCTLSCDVERS, RPC_MAXDATASIZE, RPC_MAXDATASIZE); printf("got cl=%p\n", cl); /* * The number of retries defaults to INT_MAX, which * effectively means an infinite, uninterruptable loop. * Set the try_count to 1 so that no retries of the * RPC occur. Since it is an upcall to a local daemon, * requests should not be lost and doing one of these * RPCs multiple times is not correct. * If the server is not working correctly, the * daemon can get stuck in SSL_connect() trying * to read data from the socket during the upcall. * Set a timeout (currently 15sec) and assume the * daemon is hung when the timeout occurs. */ if (cl != NULL) { try_count = 1; CLNT_CONTROL(cl, CLSET_RETRIES, &try_count); timeo.tv_sec = 15; timeo.tv_usec = 0; CLNT_CONTROL(cl, CLSET_TIMEOUT, &timeo); } else error = EINVAL; } mtx_lock(&rpctls_connect_lock); oldcl = rpctls_connect_handle; rpctls_connect_handle = cl; mtx_unlock(&rpctls_connect_lock); printf("cl=%p oldcl=%p\n", cl, oldcl); if (oldcl != NULL) { CLNT_CLOSE(oldcl); CLNT_RELEASE(oldcl); } break; case RPCTLS_SYSC_SRVSETPATH: error = copyinstr(uap->path, path, sizeof(path), NULL); printf("setting err=%d path=%s\n", error, path); if (error == 0) { error = ENXIO; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) error = 0; #endif } if (error == 0 && (strlen(path) + 1 > sizeof(sun.sun_path) || strlen(path) == 0)) error = EINVAL; cl = NULL; if (error == 0) { sun.sun_family = AF_LOCAL; strlcpy(sun.sun_path, path, sizeof(sun.sun_path)); sun.sun_len = SUN_LEN(&sun); nconf = getnetconfigent("local"); cl = clnt_reconnect_create(nconf, (struct sockaddr *)&sun, RPCTLSSD, RPCTLSSDVERS, RPC_MAXDATASIZE, RPC_MAXDATASIZE); printf("got cl=%p\n", cl); /* * The number of retries defaults to INT_MAX, which * effectively means an infinite, uninterruptable loop. * Set the try_count to 1 so that no retries of the * RPC occur. Since it is an upcall to a local daemon, * requests should not be lost and doing one of these * RPCs multiple times is not correct. * Set a timeout (currently 15sec) and assume that * the daemon is hung if a timeout occurs. */ if (cl != NULL) { try_count = 1; CLNT_CONTROL(cl, CLSET_RETRIES, &try_count); timeo.tv_sec = 15; timeo.tv_usec = 0; CLNT_CONTROL(cl, CLSET_TIMEOUT, &timeo); } else error = EINVAL; } mtx_lock(&rpctls_server_lock); oldcl = rpctls_server_handle; rpctls_server_handle = cl; mtx_unlock(&rpctls_server_lock); printf("srvcl=%p oldcl=%p\n", cl, oldcl); if (oldcl != NULL) { CLNT_CLOSE(oldcl); CLNT_RELEASE(oldcl); } break; case RPCTLS_SYSC_CLSHUTDOWN: mtx_lock(&rpctls_connect_lock); oldcl = rpctls_connect_handle; rpctls_connect_handle = NULL; mtx_unlock(&rpctls_connect_lock); printf("clshutd oldcl=%p\n", oldcl); if (oldcl != NULL) { CLNT_CLOSE(oldcl); CLNT_RELEASE(oldcl); } break; case RPCTLS_SYSC_SRVSHUTDOWN: mtx_lock(&rpctls_server_lock); oldcl = rpctls_server_handle; rpctls_server_handle = NULL; mtx_unlock(&rpctls_server_lock); printf("srvshutd oldcl=%p\n", oldcl); if (oldcl != NULL) { CLNT_CLOSE(oldcl); CLNT_RELEASE(oldcl); } break; case RPCTLS_SYSC_CLSOCKET: printf("In connect\n"); mtx_lock(&rpctls_connect_lock); so = rpctls_connect_so; rpctls_connect_so = NULL; concl = rpctls_connect_cl; rpctls_connect_cl = NULL; mtx_unlock(&rpctls_connect_lock); if (so != NULL) { error = falloc(td, &fp, &fd, 0); printf("falloc=%d fd=%d\n", error, fd); if (error == 0) { /* * Set ssl refno so that clnt_vc_destroy() will * not close the socket and will leave that for * the daemon to do. */ + soref(so); ssl[0] = ssl[1] = 0; ssl[2] = RPCTLS_REFNO_HANDSHAKE; CLNT_CONTROL(concl, CLSET_TLS, ssl); finit(fp, FREAD | FWRITE, DTYPE_SOCKET, so, &socketops); fdrop(fp, td); /* Drop fp reference. */ td->td_retval[0] = fd; } } else error = EPERM; printf("clsocket err=%d fd=%d\n", error, fd); break; case RPCTLS_SYSC_SRVSOCKET: printf("In srvconnect\n"); mtx_lock(&rpctls_server_lock); so = rpctls_server_so; rpctls_server_so = NULL; xprt = rpctls_server_xprt; rpctls_server_xprt = NULL; mtx_unlock(&rpctls_server_lock); if (so != NULL) { error = falloc(td, &fp, &fd, 0); printf("falloc=%d fd=%d\n", error, fd); if (error == 0) { /* * Once this file descriptor is associated * with the socket, it cannot be closed by * the server side krpc code (svc_vc.c). */ + soref(so); sx_xlock(&xprt->xp_lock); xprt->xp_tls = RPCTLS_FLAGS_HANDSHFAIL; sx_xunlock(&xprt->xp_lock); finit(fp, FREAD | FWRITE, DTYPE_SOCKET, so, &socketops); fdrop(fp, td); /* Drop fp reference. */ td->td_retval[0] = fd; } } else error = EPERM; printf("srvsocket err=%d fd=%d\n", error, fd); break; default: error = EINVAL; } return (error); } /* * Acquire the rpctls_connect_handle and return it with a reference count, * if it is available. */ static CLIENT * rpctls_connect_client(void) { CLIENT *cl; mtx_lock(&rpctls_connect_lock); cl = rpctls_connect_handle; if (cl != NULL) CLNT_ACQUIRE(cl); mtx_unlock(&rpctls_connect_lock); return (cl); } /* * Acquire the rpctls_server_handle and return it with a reference count, * if it is available. */ static CLIENT * rpctls_server_client(void) { CLIENT *cl; mtx_lock(&rpctls_server_lock); cl = rpctls_server_handle; if (cl != NULL) CLNT_ACQUIRE(cl); mtx_unlock(&rpctls_server_lock); return (cl); } /* Do an upcall for a new socket connect using TLS. */ enum clnt_stat rpctls_connect(CLIENT *newclient, struct socket *so, uint64_t *sslp, uint32_t *reterr) { struct rpctlscd_connect_res res; struct rpc_callextra ext; struct timeval utimeout; enum clnt_stat stat; CLIENT *cl; int val; static bool rpctls_connect_busy = false; printf("In rpctls_connect\n"); cl = rpctls_connect_client(); printf("connect_client=%p\n", cl); if (cl == NULL) return (RPC_AUTHERROR); /* First, do the AUTH_TLS NULL RPC. */ memset(&ext, 0, sizeof(ext)); utimeout.tv_sec = 30; utimeout.tv_usec = 0; ext.rc_auth = authtls_create(); printf("authtls=%p\n", ext.rc_auth); stat = clnt_call_private(newclient, &ext, NULLPROC, (xdrproc_t)xdr_void, NULL, (xdrproc_t)xdr_void, NULL, utimeout); printf("aft NULLRPC=%d\n", stat); AUTH_DESTROY(ext.rc_auth); if (stat == RPC_AUTHERROR) return (stat); if (stat != RPC_SUCCESS) return (RPC_SYSTEMERROR); /* Serialize the connect upcalls. */ mtx_lock(&rpctls_connect_lock); while (rpctls_connect_busy) msleep(&rpctls_connect_busy, &rpctls_connect_lock, PVFS, "rtlscn", 0); rpctls_connect_busy = true; rpctls_connect_so = so; rpctls_connect_cl = newclient; mtx_unlock(&rpctls_connect_lock); printf("rpctls_conect so=%p\n", so); /* Temporarily block reception during the handshake upcall. */ val = 1; CLNT_CONTROL(newclient, CLSET_BLOCKRCV, &val); /* Do the connect handshake upcall. */ stat = rpctlscd_connect_1(NULL, &res, cl); printf("aft connect upcall=%d\n", stat); if (stat == RPC_SUCCESS) { *reterr = res.reterr; if (res.reterr == 0) { *sslp++ = res.sec; *sslp++ = res.usec; *sslp = res.ssl; } } else if (stat == RPC_TIMEDOUT) { /* * Do a shutdown on the socket, since the daemon is probably * stuck in SSL_connect() trying to read the socket. * Do not soclose() the socket, since the daemon will close() * the socket after SSL_connect() returns an error. */ soshutdown(so, SHUT_RD); printf("did soshutdown rd\n"); } CLNT_RELEASE(cl); /* Unblock reception. */ val = 0; CLNT_CONTROL(newclient, CLSET_BLOCKRCV, &val); /* Once the upcall is done, the daemon is done with the fp and so. */ mtx_lock(&rpctls_connect_lock); rpctls_connect_so = NULL; rpctls_connect_cl = NULL; rpctls_connect_busy = false; wakeup(&rpctls_connect_busy); mtx_unlock(&rpctls_connect_lock); printf("aft wakeup\n"); return (stat); } /* Do an upcall to handle an non-application data record using TLS. */ enum clnt_stat rpctls_cl_handlerecord(uint64_t sec, uint64_t usec, uint64_t ssl, uint32_t *reterr) { struct rpctlscd_handlerecord_arg arg; struct rpctlscd_handlerecord_res res; enum clnt_stat stat; CLIENT *cl; printf("In rpctls_cl_handlerecord\n"); cl = rpctls_connect_client(); printf("handlerecord_client=%p\n", cl); if (cl == NULL) { *reterr = RPCTLSERR_NOSSL; return (RPC_SUCCESS); } /* Do the handlerecord upcall. */ arg.sec = sec; arg.usec = usec; arg.ssl = ssl; stat = rpctlscd_handlerecord_1(&arg, &res, cl); printf("aft handlerecord upcall=%d\n", stat); CLNT_RELEASE(cl); if (stat == RPC_SUCCESS) *reterr = res.reterr; return (stat); } enum clnt_stat rpctls_srv_handlerecord(uint64_t sec, uint64_t usec, uint64_t ssl, uint32_t *reterr) { struct rpctlssd_handlerecord_arg arg; struct rpctlssd_handlerecord_res res; enum clnt_stat stat; CLIENT *cl; printf("In rpctls_srv_handlerecord\n"); cl = rpctls_server_client(); printf("srv handlerecord_client=%p\n", cl); if (cl == NULL) { *reterr = RPCTLSERR_NOSSL; return (RPC_SUCCESS); } /* Do the handlerecord upcall. */ arg.sec = sec; arg.usec = usec; arg.ssl = ssl; stat = rpctlssd_handlerecord_1(&arg, &res, cl); printf("aft srv handlerecord upcall=%d\n", stat); CLNT_RELEASE(cl); if (stat == RPC_SUCCESS) *reterr = res.reterr; return (stat); } /* Do an upcall to shut down a socket using TLS. */ enum clnt_stat rpctls_cl_disconnect(uint64_t sec, uint64_t usec, uint64_t ssl, uint32_t *reterr) { struct rpctlscd_disconnect_arg arg; struct rpctlscd_disconnect_res res; enum clnt_stat stat; CLIENT *cl; printf("In rpctls_cl_disconnect\n"); cl = rpctls_connect_client(); printf("disconnect_client=%p\n", cl); if (cl == NULL) { *reterr = RPCTLSERR_NOSSL; return (RPC_SUCCESS); } /* Do the disconnect upcall. */ arg.sec = sec; arg.usec = usec; arg.ssl = ssl; stat = rpctlscd_disconnect_1(&arg, &res, cl); printf("aft disconnect upcall=%d\n", stat); CLNT_RELEASE(cl); if (stat == RPC_SUCCESS) *reterr = res.reterr; return (stat); } enum clnt_stat rpctls_srv_disconnect(uint64_t sec, uint64_t usec, uint64_t ssl, uint32_t *reterr) { struct rpctlssd_disconnect_arg arg; struct rpctlssd_disconnect_res res; enum clnt_stat stat; CLIENT *cl; printf("In rpctls_srv_disconnect\n"); cl = rpctls_server_client(); printf("srv disconnect_client=%p\n", cl); if (cl == NULL) { *reterr = RPCTLSERR_NOSSL; return (RPC_SUCCESS); } /* Do the disconnect upcall. */ arg.sec = sec; arg.usec = usec; arg.ssl = ssl; stat = rpctlssd_disconnect_1(&arg, &res, cl); printf("aft srv disconnect upcall=%d\n", stat); CLNT_RELEASE(cl); if (stat == RPC_SUCCESS) *reterr = res.reterr; return (stat); } /* Do an upcall for a new server socket using TLS. */ static enum clnt_stat rpctls_server(SVCXPRT *xprt, struct socket *so, uint32_t *flags, uint64_t *sslp, uid_t *uid, int *ngrps, gid_t **gids) { enum clnt_stat stat; CLIENT *cl; struct rpctlssd_connect_res res; gid_t *gidp; uint32_t *gidv; int i; static bool rpctls_server_busy = false; printf("In rpctls_server\n"); cl = rpctls_server_client(); printf("server_client=%p\n", cl); if (cl == NULL) return (RPC_SYSTEMERROR); /* Serialize the server upcalls. */ mtx_lock(&rpctls_server_lock); while (rpctls_server_busy) msleep(&rpctls_server_busy, &rpctls_server_lock, PVFS, "rtlssn", 0); rpctls_server_busy = true; rpctls_server_so = so; rpctls_server_xprt = xprt; mtx_unlock(&rpctls_server_lock); printf("rpctls_conect so=%p\n", so); /* Do the server upcall. */ stat = rpctlssd_connect_1(NULL, &res, cl); if (stat == RPC_SUCCESS) { *flags = res.flags; *sslp++ = res.sec; *sslp++ = res.usec; *sslp = res.ssl; if ((*flags & (RPCTLS_FLAGS_CERTUSER | RPCTLS_FLAGS_DISABLED)) == RPCTLS_FLAGS_CERTUSER) { *ngrps = res.gid.gid_len; *uid = res.uid; *gids = gidp = mem_alloc(*ngrps * sizeof(gid_t)); gidv = res.gid.gid_val; printf("got uid=%d ngrps=%d gidv=%p gids=%p\n", *uid, *ngrps, gidv, gids); for (i = 0; i < *ngrps; i++) *gidp++ = *gidv++; } } else if (stat == RPC_TIMEDOUT) { /* * Do a shutdown on the socket, since the daemon is probably * stuck in SSL_accept() trying to read the socket. * Do not soclose() the socket, since the daemon will close() * the socket after SSL_accept() returns an error. */ soshutdown(so, SHUT_RD); printf("did soshutdown rd\n"); } printf("aft server upcall stat=%d flags=0x%x\n", stat, res.flags); CLNT_RELEASE(cl); /* Once the upcall is done, the daemon is done with the fp and so. */ mtx_lock(&rpctls_server_lock); rpctls_server_so = NULL; rpctls_server_xprt = NULL; rpctls_server_busy = false; wakeup(&rpctls_server_busy); mtx_unlock(&rpctls_server_lock); printf("aft wakeup\n"); return (stat); } /* * Handle the NULL RPC with authentication flavor of AUTH_TLS. * This is a STARTTLS command, so do the upcall to the rpctlssd daemon, * which will do the TLS handshake. */ enum auth_stat _svcauth_rpcsec_tls(struct svc_req *rqst, struct rpc_msg *msg) { bool_t call_stat; enum clnt_stat stat; SVCXPRT *xprt; uint32_t flags; uint64_t ssl[3]; int ngrps; uid_t uid; gid_t *gidp; #ifdef KERN_TLS u_int maxlen; #endif /* Initialize reply. */ rqst->rq_verf = rpctls_null_verf; printf("authtls: clen=%d vlen=%d fl=%d\n", rqst->rq_cred.oa_length, msg->rm_call.cb_verf.oa_length, msg->rm_call.cb_verf.oa_flavor); /* Check client credentials. */ if (rqst->rq_cred.oa_length != 0 || msg->rm_call.cb_verf.oa_length != 0 || msg->rm_call.cb_verf.oa_flavor != AUTH_NULL) return (AUTH_BADCRED); printf("authtls proc=%d\n", rqst->rq_proc); if (rqst->rq_proc != NULLPROC) return (AUTH_REJECTEDCRED); call_stat = FALSE; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, true)) call_stat = TRUE; #endif if (!call_stat) return (AUTH_REJECTEDCRED); /* * Disable reception for the krpc so that the TLS handshake can * be done on the socket in the rpctlssd daemon. */ xprt = rqst->rq_xprt; sx_xlock(&xprt->xp_lock); xprt->xp_dontrcv = TRUE; sx_xunlock(&xprt->xp_lock); /* * Send the reply to the NULL RPC with AUTH_TLS, which is the * STARTTLS command for Sun RPC. */ call_stat = svc_sendreply(rqst, (xdrproc_t)xdr_void, NULL); printf("authtls: null reply=%d\n", call_stat); if (!call_stat) { sx_xlock(&xprt->xp_lock); xprt->xp_dontrcv = FALSE; sx_xunlock(&xprt->xp_lock); xprt_active(xprt); /* Harmless if already active. */ return (AUTH_REJECTEDCRED); } /* Do an upcall to do the TLS handshake. */ stat = rpctls_server(xprt, xprt->xp_socket, &flags, ssl, &uid, &ngrps, &gidp); /* Re-enable reception on the socket within the krpc. */ sx_xlock(&xprt->xp_lock); xprt->xp_dontrcv = FALSE; if (stat == RPC_SUCCESS) { xprt->xp_tls = flags; xprt->xp_sslsec = ssl[0]; xprt->xp_sslusec = ssl[1]; xprt->xp_sslrefno = ssl[2]; if ((flags & (RPCTLS_FLAGS_CERTUSER | RPCTLS_FLAGS_DISABLED)) == RPCTLS_FLAGS_CERTUSER) { xprt->xp_ngrps = ngrps; xprt->xp_uid = uid; xprt->xp_gidp = gidp; printf("got uid=%d ngrps=%d gidp=%p\n", uid, ngrps, gidp); } } sx_xunlock(&xprt->xp_lock); xprt_active(xprt); /* Harmless if already active. */ printf("authtls: aft handshake stat=%d\n", stat); return (RPCSEC_GSS_NODISPATCH); } /* * Get kern.ipc.tls.enable and kern.ipc.tls.maxlen. */ bool rpctls_getinfo(u_int *maxlenp, bool rpctlscd_run, bool rpctlssd_run) { u_int maxlen; bool enable; int error; size_t siz; if (PMAP_HAS_DMAP == 0 || !mb_use_ext_pgs) return (false); siz = sizeof(enable); error = kernel_sysctlbyname(curthread, "kern.ipc.tls.enable", &enable, &siz, NULL, 0, NULL, 0); if (error != 0) return (false); siz = sizeof(maxlen); error = kernel_sysctlbyname(curthread, "kern.ipc.tls.maxlen", &maxlen, &siz, NULL, 0, NULL, 0); if (error != 0) return (false); if (rpctlscd_run && rpctls_connect_handle == NULL) return (false); if (rpctlssd_run && rpctls_server_handle == NULL) return (false); *maxlenp = maxlen; return (enable); } Index: projects/nfs-over-tls/sys/rpc/svc_vc.c =================================================================== --- projects/nfs-over-tls/sys/rpc/svc_vc.c (revision 363117) +++ projects/nfs-over-tls/sys/rpc/svc_vc.c (revision 363118) @@ -1,1124 +1,1133 @@ /* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)svc_tcp.c 2.2 88/08/01 4.0 RPCSRC"; #endif #include __FBSDID("$FreeBSD$"); /* * svc_vc.c, Server side for Connection Oriented based RPC. * * Actually implements two flavors of transporter - * a tcp rendezvouser (a listner and connection establisher) * and a record/tcp stream. */ #include "opt_kern_tls.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *); static void svc_vc_rendezvous_destroy(SVCXPRT *); static bool_t svc_vc_null(void); static void svc_vc_destroy(SVCXPRT *); static enum xprt_stat svc_vc_stat(SVCXPRT *); static bool_t svc_vc_ack(SVCXPRT *, uint32_t *); static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *seq); static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in); static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq, void *in); static void svc_vc_backchannel_destroy(SVCXPRT *); static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *); static bool_t svc_vc_backchannel_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_vc_backchannel_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in); static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr); static int svc_vc_accept(struct socket *head, struct socket **sop); static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag); static int svc_vc_rendezvous_soupcall(struct socket *, void *, int); static struct xp_ops svc_vc_rendezvous_ops = { .xp_recv = svc_vc_rendezvous_recv, .xp_stat = svc_vc_rendezvous_stat, .xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *))svc_vc_null, .xp_destroy = svc_vc_rendezvous_destroy, .xp_control = svc_vc_rendezvous_control }; static struct xp_ops svc_vc_ops = { .xp_recv = svc_vc_recv, .xp_stat = svc_vc_stat, .xp_ack = svc_vc_ack, .xp_reply = svc_vc_reply, .xp_destroy = svc_vc_destroy, .xp_control = svc_vc_control }; static struct xp_ops svc_vc_backchannel_ops = { .xp_recv = svc_vc_backchannel_recv, .xp_stat = svc_vc_backchannel_stat, .xp_reply = svc_vc_backchannel_reply, .xp_destroy = svc_vc_backchannel_destroy, .xp_control = svc_vc_backchannel_control }; /* * Usage: * xprt = svc_vc_create(sock, send_buf_size, recv_buf_size); * * Creates, registers, and returns a (rpc) tcp based transporter. * Once *xprt is initialized, it is registered as a transporter * see (svc.h, xprt_register). This routine returns * a NULL if a problem occurred. * * The filedescriptor passed in is expected to refer to a bound, but * not yet connected socket. * * Since streams do buffered io similar to stdio, the caller can specify * how big the send and receive buffers are via the second and third parms; * 0 => use the system default. */ SVCXPRT * svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize, size_t recvsize) { SVCXPRT *xprt; struct sockaddr* sa; int error; SOCK_LOCK(so); if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) { SOCK_UNLOCK(so); CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa); CURVNET_RESTORE(); if (error) return (NULL); xprt = svc_vc_create_conn(pool, so, sa); free(sa, M_SONAME); return (xprt); } SOCK_UNLOCK(so); xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = NULL; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_rendezvous_ops; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); CURVNET_RESTORE(); if (error) { goto cleanup_svc_vc_create; } memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); free(sa, M_SONAME); xprt_register(xprt); solisten(so, -1, curthread); SOLISTEN_LOCK(so); xprt->xp_upcallset = 1; solisten_upcall_set(so, svc_vc_rendezvous_soupcall, xprt); SOLISTEN_UNLOCK(so); return (xprt); cleanup_svc_vc_create: sx_destroy(&xprt->xp_lock); svc_xprt_free(xprt); return (NULL); } /* * Create a new transport for a socket optained via soaccept(). */ SVCXPRT * svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr) { SVCXPRT *xprt; struct cf_conn *cd; struct sockaddr* sa = NULL; struct sockopt opt; int one = 1; int error; bzero(&opt, sizeof(struct sockopt)); opt.sopt_dir = SOPT_SET; opt.sopt_level = SOL_SOCKET; opt.sopt_name = SO_KEEPALIVE; opt.sopt_val = &one; opt.sopt_valsize = sizeof(one); error = sosetopt(so, &opt); if (error) { return (NULL); } if (so->so_proto->pr_protocol == IPPROTO_TCP) { bzero(&opt, sizeof(struct sockopt)); opt.sopt_dir = SOPT_SET; opt.sopt_level = IPPROTO_TCP; opt.sopt_name = TCP_NODELAY; opt.sopt_val = &one; opt.sopt_valsize = sizeof(one); error = sosetopt(so, &opt); if (error) { return (NULL); } } cd = mem_alloc(sizeof(*cd)); cd->strm_stat = XPRT_IDLE; xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = cd; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_ops; /* * See http://www.connectathon.org/talks96/nfstcp.pdf - client * has a 5 minute timer, server has a 6 minute timer. */ xprt->xp_idletimeout = 6 * 60; memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len); CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); CURVNET_RESTORE(); if (error) goto cleanup_svc_vc_create; memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); free(sa, M_SONAME); xprt_register(xprt); SOCKBUF_LOCK(&so->so_rcv); xprt->xp_upcallset = 1; soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt); SOCKBUF_UNLOCK(&so->so_rcv); /* * Throw the transport into the active list in case it already * has some data buffered. */ sx_xlock(&xprt->xp_lock); xprt_active(xprt); sx_xunlock(&xprt->xp_lock); return (xprt); cleanup_svc_vc_create: sx_destroy(&xprt->xp_lock); svc_xprt_free(xprt); mem_free(cd, sizeof(*cd)); return (NULL); } /* * Create a new transport for a backchannel on a clnt_vc socket. */ SVCXPRT * svc_vc_create_backchannel(SVCPOOL *pool) { SVCXPRT *xprt = NULL; struct cf_conn *cd = NULL; cd = mem_alloc(sizeof(*cd)); cd->strm_stat = XPRT_IDLE; xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = NULL; xprt->xp_p1 = cd; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_backchannel_ops; return (xprt); } /* * This does all of the accept except the final call to soaccept. The * caller will call soaccept after dropping its locks (soaccept may * call malloc). */ int svc_vc_accept(struct socket *head, struct socket **sop) { struct socket *so; int error = 0; short nbio; /* XXXGL: shouldn't that be an assertion? */ if ((head->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto done; } #ifdef MAC error = mac_socket_check_accept(curthread->td_ucred, head); if (error != 0) goto done; #endif /* * XXXGL: we want non-blocking semantics. The socket could be a * socket created by kernel as well as socket shared with userland, * so we can't be sure about presense of SS_NBIO. We also shall not * toggle it on the socket, since that may surprise userland. So we * set SS_NBIO only temporarily. */ SOLISTEN_LOCK(head); nbio = head->so_state & SS_NBIO; head->so_state |= SS_NBIO; error = solisten_dequeue(head, &so, 0); head->so_state &= (nbio & ~SS_NBIO); if (error) goto done; so->so_state |= nbio; *sop = so; /* connection has been removed from the listen queue */ KNOTE_UNLOCKED(&head->so_rdsel.si_note, 0); done: return (error); } /*ARGSUSED*/ static bool_t svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct socket *so = NULL; struct sockaddr *sa = NULL; int error; SVCXPRT *new_xprt; /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to accept a * connection from the socket and turn it into a new * transport. If the accept fails, we have drained all pending * connections so we call xprt_inactive(). */ sx_xlock(&xprt->xp_lock); error = svc_vc_accept(xprt->xp_socket, &so); if (error == EWOULDBLOCK) { /* * We must re-test for new connections after taking * the lock to protect us in the case where a new * connection arrives after our call to accept fails * with EWOULDBLOCK. */ SOLISTEN_LOCK(xprt->xp_socket); if (TAILQ_EMPTY(&xprt->xp_socket->sol_comp)) xprt_inactive_self(xprt); SOLISTEN_UNLOCK(xprt->xp_socket); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOLISTEN_LOCK(xprt->xp_socket); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(xprt->xp_socket, SO_RCV); } SOLISTEN_UNLOCK(xprt->xp_socket); xprt_inactive_self(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); sa = NULL; error = soaccept(so, &sa); if (error) { /* * XXX not sure if I need to call sofree or soclose here. */ if (sa) free(sa, M_SONAME); return (FALSE); } /* * svc_vc_create_conn will call xprt_register - we don't need * to do anything with the new connection except derefence it. */ new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa); if (!new_xprt) { soclose(so); } else { SVC_RELEASE(new_xprt); } free(sa, M_SONAME); return (FALSE); /* there is never an rpc msg to be processed */ } /*ARGSUSED*/ static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *xprt) { return (XPRT_IDLE); } static void svc_vc_destroy_common(SVCXPRT *xprt) { enum clnt_stat stat; uint32_t reterr; if (xprt->xp_socket) { if ((xprt->xp_tls & (RPCTLS_FLAGS_HANDSHAKE | RPCTLS_FLAGS_HANDSHFAIL)) == 0) (void)soclose(xprt->xp_socket); else if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * If the upcall fails, the socket has * probably been closed via the rpctlssd * daemon having crashed or been * restarted, so just ignore returned stat. */ stat = rpctls_srv_disconnect(xprt->xp_sslsec, xprt->xp_sslusec, xprt->xp_sslrefno, &reterr); } + if ((xprt->xp_tls & (RPCTLS_FLAGS_HANDSHAKE | + RPCTLS_FLAGS_HANDSHFAIL)) != 0) { + /* Must sorele() to get rid of reference. */ + CURVNET_SET(xprt->xp_socket->so_vnet); + SOCK_LOCK(xprt->xp_socket); + sorele(xprt->xp_socket); + CURVNET_RESTORE(); + } } if (xprt->xp_netid) (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); svc_xprt_free(xprt); } static void svc_vc_rendezvous_destroy(SVCXPRT *xprt) { SOLISTEN_LOCK(xprt->xp_socket); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; solisten_upcall_set(xprt->xp_socket, NULL, NULL); } SOLISTEN_UNLOCK(xprt->xp_socket); svc_vc_destroy_common(xprt); } static void svc_vc_destroy(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; - soupcall_clear(xprt->xp_socket, SO_RCV); + if (xprt->xp_socket->so_rcv.sb_upcall != NULL) + soupcall_clear(xprt->xp_socket, SO_RCV); } SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); svc_vc_destroy_common(xprt); if (cd->mreq) m_freem(cd->mreq); if (cd->mpending) m_freem(cd->mpending); mem_free(cd, sizeof(*cd)); } static void svc_vc_backchannel_destroy(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; struct mbuf *m, *m2; svc_xprt_free(xprt); m = cd->mreq; while (m != NULL) { m2 = m; m = m->m_nextpkt; m_freem(m2); } mem_free(cd, sizeof(*cd)); } /*ARGSUSED*/ static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static bool_t svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static enum xprt_stat svc_vc_stat(SVCXPRT *xprt) { struct cf_conn *cd; cd = (struct cf_conn *)(xprt->xp_p1); if (cd->strm_stat == XPRT_DIED) return (XPRT_DIED); if (cd->mreq != NULL && cd->resid == 0 && cd->eor) return (XPRT_MOREREQS); if (soreadable(xprt->xp_socket)) return (XPRT_MOREREQS); return (XPRT_IDLE); } static bool_t svc_vc_ack(SVCXPRT *xprt, uint32_t *ack) { *ack = atomic_load_acq_32(&xprt->xp_snt_cnt); *ack -= sbused(&xprt->xp_socket->so_snd); return (TRUE); } static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *xprt) { struct cf_conn *cd; cd = (struct cf_conn *)(xprt->xp_p1); if (cd->mreq != NULL) return (XPRT_MOREREQS); return (XPRT_IDLE); } /* * If we have an mbuf chain in cd->mpending, try to parse a record from it, * leaving the result in cd->mreq. If we don't have a complete record, leave * the partial result in cd->mreq and try to read more from the socket. */ static int svc_vc_process_pending(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct socket *so = xprt->xp_socket; struct mbuf *m; /* * If cd->resid is non-zero, we have part of the * record already, otherwise we are expecting a record * marker. */ if (!cd->resid && cd->mpending) { /* * See if there is enough data buffered to * make up a record marker. Make sure we can * handle the case where the record marker is * split across more than one mbuf. */ size_t n = 0; uint32_t header; m = cd->mpending; while (n < sizeof(uint32_t) && m) { n += m->m_len; m = m->m_next; } if (n < sizeof(uint32_t)) { so->so_rcv.sb_lowat = sizeof(uint32_t) - n; return (FALSE); } m_copydata(cd->mpending, 0, sizeof(header), (char *)&header); header = ntohl(header); cd->eor = (header & 0x80000000) != 0; cd->resid = header & 0x7fffffff; m_adj(cd->mpending, sizeof(uint32_t)); } /* * Start pulling off mbufs from cd->mpending * until we either have a complete record or * we run out of data. We use m_split to pull * data - it will pull as much as possible and * split the last mbuf if necessary. */ while (cd->mpending && cd->resid) { m = cd->mpending; if (cd->mpending->m_next || cd->mpending->m_len > cd->resid) cd->mpending = m_split(cd->mpending, cd->resid, M_WAITOK); else cd->mpending = NULL; if (cd->mreq) m_last(cd->mreq)->m_next = m; else cd->mreq = m; while (m) { cd->resid -= m->m_len; m = m->m_next; } } /* * Block receive upcalls if we have more data pending, * otherwise report our need. */ if (cd->mpending) so->so_rcv.sb_lowat = INT_MAX; else so->so_rcv.sb_lowat = imax(1, imin(cd->resid, so->so_rcv.sb_hiwat / 2)); return (TRUE); } static bool_t svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct uio uio; struct mbuf *m, *ctrl; struct socket* so = xprt->xp_socket; XDR xdrs; int error, rcvflag; uint32_t reterr, xid_plus_direction[2]; struct cmsghdr *cmsg; struct tls_get_record tgr; enum clnt_stat ret; /* * Serialise access to the socket and our own record parsing * state. */ sx_xlock(&xprt->xp_lock); for (;;) { /* If we have no request ready, check pending queue. */ while (cd->mpending && (cd->mreq == NULL || cd->resid != 0 || !cd->eor)) { if (!svc_vc_process_pending(xprt)) break; } /* Process and return complete request in cd->mreq. */ if (cd->mreq != NULL && cd->resid == 0 && cd->eor) { /* * Now, check for a backchannel reply. * The XID is in the first uint32_t of the reply * and the message direction is the second one. */ if ((cd->mreq->m_len >= sizeof(xid_plus_direction) || m_length(cd->mreq, NULL) >= sizeof(xid_plus_direction)) && xprt->xp_p2 != NULL) { m_copydata(cd->mreq, 0, sizeof(xid_plus_direction), (char *)xid_plus_direction); xid_plus_direction[0] = ntohl(xid_plus_direction[0]); xid_plus_direction[1] = ntohl(xid_plus_direction[1]); /* Check message direction. */ if (xid_plus_direction[1] == REPLY) { clnt_bck_svccall(xprt->xp_p2, cd->mreq, xid_plus_direction[0]); cd->mreq = NULL; continue; } } xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE); cd->mreq = NULL; /* Check for next request in a pending queue. */ svc_vc_process_pending(xprt); if (cd->mreq == NULL || cd->resid != 0) { SOCKBUF_LOCK(&so->so_rcv); if (!soreadable(so)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&so->so_rcv); } sx_xunlock(&xprt->xp_lock); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } /* * If receiving is disabled so that a TLS handshake can be * done by the rpctlssd daemon, return FALSE here. */ rcvflag = MSG_DONTWAIT; if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) rcvflag |= MSG_TLSAPPDATA; tryagain: if (xprt->xp_dontrcv) { sx_xunlock(&xprt->xp_lock); return (FALSE); } /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to * read as much as possible from the socket and put * the result in cd->mpending. If the read fails, * we have drained both cd->mpending and the socket so * we can call xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; ctrl = m = NULL; error = soreceive(so, NULL, &uio, &m, &ctrl, &rcvflag); if (error == EWOULDBLOCK) { /* * We must re-test for readability after * taking the lock to protect us in the case * where a new packet arrives on the socket * after our call to soreceive fails with * EWOULDBLOCK. */ SOCKBUF_LOCK(&so->so_rcv); if (!soreadable(so)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&so->so_rcv); sx_xunlock(&xprt->xp_lock); return (FALSE); } /* * A return of ENXIO indicates that there is a * non-application data record at the head of the * socket's receive queue, for TLS connections. * This record needs to be handled in userland * via an SSL_read() call, so do an upcall to the daemon. */ if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0 && error == ENXIO) { /* Disable reception. */ xprt->xp_dontrcv = TRUE; sx_xunlock(&xprt->xp_lock); printf("Call rpctls_srv_handlerecord\n"); ret = rpctls_srv_handlerecord(xprt->xp_sslsec, xprt->xp_sslusec, xprt->xp_sslrefno, &reterr); sx_xlock(&xprt->xp_lock); xprt->xp_dontrcv = FALSE; if (ret != RPC_SUCCESS || reterr != RPCTLSERR_OK) { /* * All we can do is soreceive() it and * then toss it. */ rcvflag = MSG_DONTWAIT; goto tryagain; } sx_xunlock(&xprt->xp_lock); xprt_active(xprt); /* Harmless if already active. */ return (FALSE); } if (error) { SOCKBUF_LOCK(&so->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(so, SO_RCV); } SOCKBUF_UNLOCK(&so->so_rcv); xprt_inactive_self(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } if (!m) { /* * EOF - the other end has closed the socket. */ xprt_inactive_self(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } /* Process any record header(s). */ if (ctrl != NULL) { if (ctrl->m_next != NULL) printf("EEK! svc list of controls\n"); cmsg = mtod(ctrl, struct cmsghdr *); if (cmsg->cmsg_type == TLS_GET_RECORD && cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) { memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr)); /* * This should have been handled by * the rpctls_svc_handlerecord() * upcall. If not, all we can do is * toss it away. */ if (tgr.tls_type != TLS_RLTYPE_APP) { printf("Got weird type=%d\n", tgr.tls_type); m_freem(m); m_free(ctrl); rcvflag = MSG_DONTWAIT | MSG_TLSAPPDATA; goto tryagain; } } m_free(ctrl); } if (cd->mpending) m_last(cd->mpending)->m_next = m; else cd->mpending = m; } } static bool_t svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct ct_data *ct; struct mbuf *m; XDR xdrs; sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct == NULL) { sx_xunlock(&xprt->xp_lock); return (FALSE); } mtx_lock(&ct->ct_lock); m = cd->mreq; if (m == NULL) { xprt_inactive_self(xprt); mtx_unlock(&ct->ct_lock); sx_xunlock(&xprt->xp_lock); return (FALSE); } cd->mreq = m->m_nextpkt; mtx_unlock(&ct->ct_lock); sx_xunlock(&xprt->xp_lock); printf("recv backch m=%p\n", m); xdrmbuf_create(&xdrs, m, XDR_DECODE); if (! xdr_callmsg(&xdrs, msg)) { printf("recv backch callmsg failed\n"); XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } static bool_t svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error, len, maxextsiz; #ifdef KERN_TLS u_int maxlen; #endif /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); len = mrep->m_pkthdr.len; *mtod(mrep, uint32_t *) = htonl(0x80000000 | (len - sizeof(uint32_t))); /* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */ if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * Copy the mbuf chain to a chain of * ext_pgs mbuf(s) as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mrep = _rpc_copym_into_ext_pgs(mrep, maxextsiz); } atomic_add_32(&xprt->xp_snd_cnt, len); /* * sosend consumes mreq. */ error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL, 0, curthread); if (!error) { atomic_add_rel_32(&xprt->xp_snt_cnt, len); if (seq) *seq = xprt->xp_snd_cnt; stat = TRUE; } else atomic_subtract_32(&xprt->xp_snd_cnt, len); } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); } static bool_t svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { struct ct_data *ct; XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error, maxextsiz; #ifdef KERN_TLS u_int maxlen; #endif /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); *mtod(mrep, uint32_t *) = htonl(0x80000000 | (mrep->m_pkthdr.len - sizeof(uint32_t))); /* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */ if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * Copy the mbuf chain to a chain of * ext_pgs mbuf(s) as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mrep = _rpc_copym_into_ext_pgs(mrep, maxextsiz); } sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct != NULL) error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL, 0, curthread); else error = EPIPE; sx_xunlock(&xprt->xp_lock); if (!error) { stat = TRUE; } } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); } static bool_t svc_vc_null() { return (FALSE); } static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; if (soreadable(xprt->xp_socket)) xprt_active(xprt); return (SU_OK); } static int svc_vc_rendezvous_soupcall(struct socket *head, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; if (!TAILQ_EMPTY(&head->sol_comp)) xprt_active(xprt); return (SU_OK); } #if 0 /* * Get the effective UID of the sending process. Used by rpcbind, keyserv * and rpc.yppasswdd on AF_LOCAL. */ int __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) { int sock, ret; gid_t egid; uid_t euid; struct sockaddr *sa; sock = transp->xp_fd; sa = (struct sockaddr *)transp->xp_rtaddr; if (sa->sa_family == AF_LOCAL) { ret = getpeereid(sock, &euid, &egid); if (ret == 0) *uid = euid; return (ret); } else return (-1); } #endif