Index: head/sys/rpc/rpc_generic.c =================================================================== --- head/sys/rpc/rpc_generic.c (revision 286893) +++ head/sys/rpc/rpc_generic.c (revision 286894) @@ -1,882 +1,886 @@ /* $NetBSD: rpc_generic.c,v 1.4 2000/09/28 09:07:04 kleink Exp $ */ /*- * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1986-1991 by Sun Microsystems Inc. */ /* #pragma ident "@(#)rpc_generic.c 1.17 94/04/24 SMI" */ #include __FBSDID("$FreeBSD$"); /* * rpc_generic.c, Miscl routines for RPC. * */ #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern u_long sb_max_adj; /* not defined in socketvar.h */ #if __FreeBSD_version < 700000 #define strrchr rindex #endif /* Provide an entry point hook for the rpcsec_gss module. */ struct rpc_gss_entries rpc_gss_entries; struct handle { NCONF_HANDLE *nhandle; int nflag; /* Whether NETPATH or NETCONFIG */ int nettype; }; static const struct _rpcnettype { const char *name; const int type; } _rpctypelist[] = { { "netpath", _RPC_NETPATH }, { "visible", _RPC_VISIBLE }, { "circuit_v", _RPC_CIRCUIT_V }, { "datagram_v", _RPC_DATAGRAM_V }, { "circuit_n", _RPC_CIRCUIT_N }, { "datagram_n", _RPC_DATAGRAM_N }, { "tcp", _RPC_TCP }, { "udp", _RPC_UDP }, { 0, _RPC_NONE } }; struct netid_af { const char *netid; int af; int protocol; }; static const struct netid_af na_cvt[] = { { "udp", AF_INET, IPPROTO_UDP }, { "tcp", AF_INET, IPPROTO_TCP }, #ifdef INET6 { "udp6", AF_INET6, IPPROTO_UDP }, { "tcp6", AF_INET6, IPPROTO_TCP }, #endif { "local", AF_LOCAL, 0 } }; struct rpc_createerr rpc_createerr; /* * Find the appropriate buffer size */ u_int /*ARGSUSED*/ __rpc_get_t_size(int af, int proto, int size) { int defsize; switch (proto) { case IPPROTO_TCP: defsize = 64 * 1024; /* XXX */ break; case IPPROTO_UDP: defsize = UDPMSGSIZE; break; default: defsize = RPC_MAXDATASIZE; break; } if (size == 0) return defsize; /* Check whether the value is within the upper max limit */ return (size > sb_max_adj ? (u_int)sb_max_adj : (u_int)size); } /* * Find the appropriate address buffer size */ u_int __rpc_get_a_size(af) int af; { switch (af) { case AF_INET: return sizeof (struct sockaddr_in); #ifdef INET6 case AF_INET6: return sizeof (struct sockaddr_in6); #endif case AF_LOCAL: return sizeof (struct sockaddr_un); default: break; } return ((u_int)RPC_MAXADDRSIZE); } #if 0 /* * Used to ping the NULL procedure for clnt handle. * Returns NULL if fails, else a non-NULL pointer. */ void * rpc_nullproc(clnt) CLIENT *clnt; { struct timeval TIMEOUT = {25, 0}; if (clnt_call(clnt, NULLPROC, (xdrproc_t) xdr_void, NULL, (xdrproc_t) xdr_void, NULL, TIMEOUT) != RPC_SUCCESS) { return (NULL); } return ((void *) clnt); } #endif int __rpc_socket2sockinfo(struct socket *so, struct __rpc_sockinfo *sip) { int type, proto; struct sockaddr *sa; sa_family_t family; struct sockopt opt; int error; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); CURVNET_RESTORE(); if (error) return 0; sip->si_alen = sa->sa_len; family = sa->sa_family; free(sa, M_SONAME); opt.sopt_dir = SOPT_GET; opt.sopt_level = SOL_SOCKET; opt.sopt_name = SO_TYPE; opt.sopt_val = &type; opt.sopt_valsize = sizeof type; opt.sopt_td = NULL; error = sogetopt(so, &opt); if (error) return 0; /* XXX */ if (family != AF_LOCAL) { if (type == SOCK_STREAM) proto = IPPROTO_TCP; else if (type == SOCK_DGRAM) proto = IPPROTO_UDP; else return 0; } else proto = 0; sip->si_af = family; sip->si_proto = proto; sip->si_socktype = type; return 1; } /* * Linear search, but the number of entries is small. */ int __rpc_nconf2sockinfo(const struct netconfig *nconf, struct __rpc_sockinfo *sip) { int i; for (i = 0; i < (sizeof na_cvt) / (sizeof (struct netid_af)); i++) if (strcmp(na_cvt[i].netid, nconf->nc_netid) == 0 || ( strcmp(nconf->nc_netid, "unix") == 0 && strcmp(na_cvt[i].netid, "local") == 0)) { sip->si_af = na_cvt[i].af; sip->si_proto = na_cvt[i].protocol; sip->si_socktype = __rpc_seman2socktype((int)nconf->nc_semantics); if (sip->si_socktype == -1) return 0; sip->si_alen = __rpc_get_a_size(sip->si_af); return 1; } return 0; } struct socket * __rpc_nconf2socket(const struct netconfig *nconf) { struct __rpc_sockinfo si; struct socket *so; int error; if (!__rpc_nconf2sockinfo(nconf, &si)) return 0; so = NULL; error = socreate(si.si_af, &so, si.si_socktype, si.si_proto, curthread->td_ucred, curthread); if (error) return NULL; else return so; } char * taddr2uaddr(const struct netconfig *nconf, const struct netbuf *nbuf) { struct __rpc_sockinfo si; if (!__rpc_nconf2sockinfo(nconf, &si)) return NULL; return __rpc_taddr2uaddr_af(si.si_af, nbuf); } struct netbuf * uaddr2taddr(const struct netconfig *nconf, const char *uaddr) { struct __rpc_sockinfo si; if (!__rpc_nconf2sockinfo(nconf, &si)) return NULL; return __rpc_uaddr2taddr_af(si.si_af, uaddr); } char * __rpc_taddr2uaddr_af(int af, const struct netbuf *nbuf) { char *ret; struct sbuf sb; struct sockaddr_in *sin; struct sockaddr_un *sun; char namebuf[INET_ADDRSTRLEN]; #ifdef INET6 struct sockaddr_in6 *sin6; char namebuf6[INET6_ADDRSTRLEN]; #endif u_int16_t port; sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND); switch (af) { case AF_INET: sin = nbuf->buf; if (inet_ntop(af, &sin->sin_addr, namebuf, sizeof namebuf) == NULL) return NULL; port = ntohs(sin->sin_port); if (sbuf_printf(&sb, "%s.%u.%u", namebuf, ((uint32_t)port) >> 8, port & 0xff) < 0) return NULL; break; #ifdef INET6 case AF_INET6: sin6 = nbuf->buf; if (inet_ntop(af, &sin6->sin6_addr, namebuf6, sizeof namebuf6) == NULL) return NULL; port = ntohs(sin6->sin6_port); if (sbuf_printf(&sb, "%s.%u.%u", namebuf6, ((uint32_t)port) >> 8, port & 0xff) < 0) return NULL; break; #endif case AF_LOCAL: sun = nbuf->buf; if (sbuf_printf(&sb, "%.*s", (int)(sun->sun_len - offsetof(struct sockaddr_un, sun_path)), sun->sun_path) < 0) return (NULL); break; default: return NULL; } sbuf_finish(&sb); ret = strdup(sbuf_data(&sb), M_RPC); sbuf_delete(&sb); return ret; } struct netbuf * __rpc_uaddr2taddr_af(int af, const char *uaddr) { struct netbuf *ret = NULL; char *addrstr, *p; unsigned port, portlo, porthi; struct sockaddr_in *sin; #ifdef INET6 struct sockaddr_in6 *sin6; #endif struct sockaddr_un *sun; port = 0; sin = NULL; addrstr = strdup(uaddr, M_RPC); if (addrstr == NULL) return NULL; /* * AF_LOCAL addresses are expected to be absolute * pathnames, anything else will be AF_INET or AF_INET6. */ if (*addrstr != '/') { p = strrchr(addrstr, '.'); if (p == NULL) goto out; portlo = (unsigned)strtol(p + 1, NULL, 10); *p = '\0'; p = strrchr(addrstr, '.'); if (p == NULL) goto out; porthi = (unsigned)strtol(p + 1, NULL, 10); *p = '\0'; port = (porthi << 8) | portlo; } ret = (struct netbuf *)malloc(sizeof *ret, M_RPC, M_WAITOK); if (ret == NULL) goto out; switch (af) { case AF_INET: sin = (struct sockaddr_in *)malloc(sizeof *sin, M_RPC, M_WAITOK); if (sin == NULL) goto out; memset(sin, 0, sizeof *sin); sin->sin_family = AF_INET; sin->sin_port = htons(port); if (inet_pton(AF_INET, addrstr, &sin->sin_addr) <= 0) { free(sin, M_RPC); free(ret, M_RPC); ret = NULL; goto out; } sin->sin_len = ret->maxlen = ret->len = sizeof *sin; ret->buf = sin; break; #ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)malloc(sizeof *sin6, M_RPC, M_WAITOK); if (sin6 == NULL) goto out; memset(sin6, 0, sizeof *sin6); sin6->sin6_family = AF_INET6; sin6->sin6_port = htons(port); if (inet_pton(AF_INET6, addrstr, &sin6->sin6_addr) <= 0) { free(sin6, M_RPC); free(ret, M_RPC); ret = NULL; goto out; } sin6->sin6_len = ret->maxlen = ret->len = sizeof *sin6; ret->buf = sin6; break; #endif case AF_LOCAL: sun = (struct sockaddr_un *)malloc(sizeof *sun, M_RPC, M_WAITOK); if (sun == NULL) goto out; memset(sun, 0, sizeof *sun); sun->sun_family = AF_LOCAL; strncpy(sun->sun_path, addrstr, sizeof(sun->sun_path) - 1); ret->len = ret->maxlen = sun->sun_len = SUN_LEN(sun); ret->buf = sun; break; default: break; } out: free(addrstr, M_RPC); return ret; } int __rpc_seman2socktype(int semantics) { switch (semantics) { case NC_TPI_CLTS: return SOCK_DGRAM; case NC_TPI_COTS_ORD: return SOCK_STREAM; case NC_TPI_RAW: return SOCK_RAW; default: break; } return -1; } int __rpc_socktype2seman(int socktype) { switch (socktype) { case SOCK_DGRAM: return NC_TPI_CLTS; case SOCK_STREAM: return NC_TPI_COTS_ORD; case SOCK_RAW: return NC_TPI_RAW; default: break; } return -1; } /* * Returns the type of the network as defined in * If nettype is NULL, it defaults to NETPATH. */ static int getnettype(const char *nettype) { int i; if ((nettype == NULL) || (nettype[0] == 0)) { return (_RPC_NETPATH); /* Default */ } #if 0 nettype = strlocase(nettype); #endif for (i = 0; _rpctypelist[i].name; i++) if (strcasecmp(nettype, _rpctypelist[i].name) == 0) { return (_rpctypelist[i].type); } return (_rpctypelist[i].type); } /* * For the given nettype (tcp or udp only), return the first structure found. * This should be freed by calling freenetconfigent() */ struct netconfig * __rpc_getconfip(const char *nettype) { char *netid; static char *netid_tcp = (char *) NULL; static char *netid_udp = (char *) NULL; struct netconfig *dummy; if (!netid_udp && !netid_tcp) { struct netconfig *nconf; void *confighandle; if (!(confighandle = setnetconfig())) { log(LOG_ERR, "rpc: failed to open " NETCONFIG); return (NULL); } while ((nconf = getnetconfig(confighandle)) != NULL) { if (strcmp(nconf->nc_protofmly, NC_INET) == 0) { if (strcmp(nconf->nc_proto, NC_TCP) == 0) { netid_tcp = strdup(nconf->nc_netid, M_RPC); } else if (strcmp(nconf->nc_proto, NC_UDP) == 0) { netid_udp = strdup(nconf->nc_netid, M_RPC); } } } endnetconfig(confighandle); } if (strcmp(nettype, "udp") == 0) netid = netid_udp; else if (strcmp(nettype, "tcp") == 0) netid = netid_tcp; else { return (NULL); } if ((netid == NULL) || (netid[0] == 0)) { return (NULL); } dummy = getnetconfigent(netid); return (dummy); } /* * Returns the type of the nettype, which should then be used with * __rpc_getconf(). * * For simplicity in the kernel, we don't support the NETPATH * environment variable. We behave as userland would then NETPATH is * unset, i.e. iterate over all visible entries in netconfig. */ void * __rpc_setconf(nettype) const char *nettype; { struct handle *handle; handle = (struct handle *) malloc(sizeof (struct handle), M_RPC, M_WAITOK); switch (handle->nettype = getnettype(nettype)) { case _RPC_NETPATH: case _RPC_CIRCUIT_N: case _RPC_DATAGRAM_N: if (!(handle->nhandle = setnetconfig())) goto failed; handle->nflag = TRUE; break; case _RPC_VISIBLE: case _RPC_CIRCUIT_V: case _RPC_DATAGRAM_V: case _RPC_TCP: case _RPC_UDP: if (!(handle->nhandle = setnetconfig())) { log(LOG_ERR, "rpc: failed to open " NETCONFIG); goto failed; } handle->nflag = FALSE; break; default: goto failed; } return (handle); failed: free(handle, M_RPC); return (NULL); } /* * Returns the next netconfig struct for the given "net" type. * __rpc_setconf() should have been called previously. */ struct netconfig * __rpc_getconf(void *vhandle) { struct handle *handle; struct netconfig *nconf; handle = (struct handle *)vhandle; if (handle == NULL) { return (NULL); } for (;;) { if (handle->nflag) { nconf = getnetconfig(handle->nhandle); if (nconf && !(nconf->nc_flag & NC_VISIBLE)) continue; } else { nconf = getnetconfig(handle->nhandle); } if (nconf == NULL) break; if ((nconf->nc_semantics != NC_TPI_CLTS) && (nconf->nc_semantics != NC_TPI_COTS) && (nconf->nc_semantics != NC_TPI_COTS_ORD)) continue; switch (handle->nettype) { case _RPC_VISIBLE: if (!(nconf->nc_flag & NC_VISIBLE)) continue; /* FALLTHROUGH */ case _RPC_NETPATH: /* Be happy */ break; case _RPC_CIRCUIT_V: if (!(nconf->nc_flag & NC_VISIBLE)) continue; /* FALLTHROUGH */ case _RPC_CIRCUIT_N: if ((nconf->nc_semantics != NC_TPI_COTS) && (nconf->nc_semantics != NC_TPI_COTS_ORD)) continue; break; case _RPC_DATAGRAM_V: if (!(nconf->nc_flag & NC_VISIBLE)) continue; /* FALLTHROUGH */ case _RPC_DATAGRAM_N: if (nconf->nc_semantics != NC_TPI_CLTS) continue; break; case _RPC_TCP: if (((nconf->nc_semantics != NC_TPI_COTS) && (nconf->nc_semantics != NC_TPI_COTS_ORD)) || (strcmp(nconf->nc_protofmly, NC_INET) #ifdef INET6 && strcmp(nconf->nc_protofmly, NC_INET6)) #else ) #endif || strcmp(nconf->nc_proto, NC_TCP)) continue; break; case _RPC_UDP: if ((nconf->nc_semantics != NC_TPI_CLTS) || (strcmp(nconf->nc_protofmly, NC_INET) #ifdef INET6 && strcmp(nconf->nc_protofmly, NC_INET6)) #else ) #endif || strcmp(nconf->nc_proto, NC_UDP)) continue; break; } break; } return (nconf); } void __rpc_endconf(vhandle) void * vhandle; { struct handle *handle; handle = (struct handle *) vhandle; if (handle == NULL) { return; } endnetconfig(handle->nhandle); free(handle, M_RPC); } int __rpc_sockisbound(struct socket *so) { struct sockaddr *sa; int error, bound; + CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); + CURVNET_RESTORE(); if (error) return (0); switch (sa->sa_family) { case AF_INET: bound = (((struct sockaddr_in *) sa)->sin_port != 0); break; #ifdef INET6 case AF_INET6: bound = (((struct sockaddr_in6 *) sa)->sin6_port != 0); break; #endif case AF_LOCAL: /* XXX check this */ bound = (((struct sockaddr_un *) sa)->sun_path[0] != '\0'); break; default: bound = FALSE; break; } free(sa, M_SONAME); return bound; } /* * Implement XDR-style API for RPC call. */ enum clnt_stat clnt_call_private( CLIENT *cl, /* client handle */ struct rpc_callextra *ext, /* call metadata */ rpcproc_t proc, /* procedure number */ xdrproc_t xargs, /* xdr routine for args */ void *argsp, /* pointer to args */ xdrproc_t xresults, /* xdr routine for results */ void *resultsp, /* pointer to results */ struct timeval utimeout) /* seconds to wait before giving up */ { XDR xdrs; struct mbuf *mreq; struct mbuf *mrep; enum clnt_stat stat; mreq = m_getcl(M_WAITOK, MT_DATA, 0); xdrmbuf_create(&xdrs, mreq, XDR_ENCODE); if (!xargs(&xdrs, argsp)) { m_freem(mreq); return (RPC_CANTENCODEARGS); } XDR_DESTROY(&xdrs); stat = CLNT_CALL_MBUF(cl, ext, proc, mreq, &mrep, utimeout); m_freem(mreq); if (stat == RPC_SUCCESS) { xdrmbuf_create(&xdrs, mrep, XDR_DECODE); if (!xresults(&xdrs, resultsp)) { XDR_DESTROY(&xdrs); return (RPC_CANTDECODERES); } XDR_DESTROY(&xdrs); } return (stat); } /* * Bind a socket to a privileged IP port */ int bindresvport(struct socket *so, struct sockaddr *sa) { int old, error, af; bool_t freesa = FALSE; struct sockaddr_in *sin; #ifdef INET6 struct sockaddr_in6 *sin6; #endif struct sockopt opt; int proto, portrange, portlow; u_int16_t *portp; socklen_t salen; if (sa == NULL) { + CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); + CURVNET_RESTORE(); if (error) return (error); freesa = TRUE; af = sa->sa_family; salen = sa->sa_len; memset(sa, 0, sa->sa_len); } else { af = sa->sa_family; salen = sa->sa_len; } switch (af) { case AF_INET: proto = IPPROTO_IP; portrange = IP_PORTRANGE; portlow = IP_PORTRANGE_LOW; sin = (struct sockaddr_in *)sa; portp = &sin->sin_port; break; #ifdef INET6 case AF_INET6: proto = IPPROTO_IPV6; portrange = IPV6_PORTRANGE; portlow = IPV6_PORTRANGE_LOW; sin6 = (struct sockaddr_in6 *)sa; portp = &sin6->sin6_port; break; #endif default: return (EPFNOSUPPORT); } sa->sa_family = af; sa->sa_len = salen; if (*portp == 0) { bzero(&opt, sizeof(opt)); opt.sopt_dir = SOPT_GET; opt.sopt_level = proto; opt.sopt_name = portrange; opt.sopt_val = &old; opt.sopt_valsize = sizeof(old); error = sogetopt(so, &opt); if (error) { goto out; } opt.sopt_dir = SOPT_SET; opt.sopt_val = &portlow; error = sosetopt(so, &opt); if (error) goto out; } error = sobind(so, sa, curthread); if (*portp == 0) { if (error) { opt.sopt_dir = SOPT_SET; opt.sopt_val = &old; sosetopt(so, &opt); } } out: if (freesa) free(sa, M_SONAME); return (error); } /* * Kernel module glue */ static int krpc_modevent(module_t mod, int type, void *data) { return (0); } static moduledata_t krpc_mod = { "krpc", krpc_modevent, NULL, }; DECLARE_MODULE(krpc, krpc_mod, SI_SUB_VFS, SI_ORDER_ANY); /* So that loader and kldload(2) can find us, wherever we are.. */ MODULE_VERSION(krpc, 1); Index: head/sys/rpc/svc_vc.c =================================================================== --- head/sys/rpc/svc_vc.c (revision 286893) +++ head/sys/rpc/svc_vc.c (revision 286894) @@ -1,979 +1,985 @@ /* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */ /*- * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)svc_tcp.c 2.2 88/08/01 4.0 RPCSRC"; #endif #include __FBSDID("$FreeBSD$"); /* * svc_vc.c, Server side for Connection Oriented based RPC. * * Actually implements two flavors of transporter - * a tcp rendezvouser (a listner and connection establisher) * and a record/tcp stream. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *); static void svc_vc_rendezvous_destroy(SVCXPRT *); static bool_t svc_vc_null(void); static void svc_vc_destroy(SVCXPRT *); static enum xprt_stat svc_vc_stat(SVCXPRT *); static bool_t svc_vc_ack(SVCXPRT *, uint32_t *); static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *seq); static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in); static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq, void *in); static void svc_vc_backchannel_destroy(SVCXPRT *); static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *); static bool_t svc_vc_backchannel_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_vc_backchannel_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in); static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr); static int svc_vc_accept(struct socket *head, struct socket **sop); static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag); static struct xp_ops svc_vc_rendezvous_ops = { .xp_recv = svc_vc_rendezvous_recv, .xp_stat = svc_vc_rendezvous_stat, .xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *))svc_vc_null, .xp_destroy = svc_vc_rendezvous_destroy, .xp_control = svc_vc_rendezvous_control }; static struct xp_ops svc_vc_ops = { .xp_recv = svc_vc_recv, .xp_stat = svc_vc_stat, .xp_ack = svc_vc_ack, .xp_reply = svc_vc_reply, .xp_destroy = svc_vc_destroy, .xp_control = svc_vc_control }; static struct xp_ops svc_vc_backchannel_ops = { .xp_recv = svc_vc_backchannel_recv, .xp_stat = svc_vc_backchannel_stat, .xp_reply = svc_vc_backchannel_reply, .xp_destroy = svc_vc_backchannel_destroy, .xp_control = svc_vc_backchannel_control }; /* * Usage: * xprt = svc_vc_create(sock, send_buf_size, recv_buf_size); * * Creates, registers, and returns a (rpc) tcp based transporter. * Once *xprt is initialized, it is registered as a transporter * see (svc.h, xprt_register). This routine returns * a NULL if a problem occurred. * * The filedescriptor passed in is expected to refer to a bound, but * not yet connected socket. * * Since streams do buffered io similar to stdio, the caller can specify * how big the send and receive buffers are via the second and third parms; * 0 => use the system default. */ SVCXPRT * svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize, size_t recvsize) { SVCXPRT *xprt = NULL; struct sockaddr* sa; int error; SOCK_LOCK(so); if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) { SOCK_UNLOCK(so); + CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa); + CURVNET_RESTORE(); if (error) return (NULL); xprt = svc_vc_create_conn(pool, so, sa); free(sa, M_SONAME); return (xprt); } SOCK_UNLOCK(so); xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = NULL; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_rendezvous_ops; + CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); + CURVNET_RESTORE(); if (error) { goto cleanup_svc_vc_create; } memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); free(sa, M_SONAME); xprt_register(xprt); solisten(so, -1, curthread); SOCKBUF_LOCK(&so->so_rcv); xprt->xp_upcallset = 1; soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt); SOCKBUF_UNLOCK(&so->so_rcv); return (xprt); cleanup_svc_vc_create: if (xprt) { sx_destroy(&xprt->xp_lock); svc_xprt_free(xprt); } return (NULL); } /* * Create a new transport for a socket optained via soaccept(). */ SVCXPRT * svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr) { SVCXPRT *xprt = NULL; struct cf_conn *cd = NULL; struct sockaddr* sa = NULL; struct sockopt opt; int one = 1; int error; bzero(&opt, sizeof(struct sockopt)); opt.sopt_dir = SOPT_SET; opt.sopt_level = SOL_SOCKET; opt.sopt_name = SO_KEEPALIVE; opt.sopt_val = &one; opt.sopt_valsize = sizeof(one); error = sosetopt(so, &opt); if (error) { return (NULL); } if (so->so_proto->pr_protocol == IPPROTO_TCP) { bzero(&opt, sizeof(struct sockopt)); opt.sopt_dir = SOPT_SET; opt.sopt_level = IPPROTO_TCP; opt.sopt_name = TCP_NODELAY; opt.sopt_val = &one; opt.sopt_valsize = sizeof(one); error = sosetopt(so, &opt); if (error) { return (NULL); } } cd = mem_alloc(sizeof(*cd)); cd->strm_stat = XPRT_IDLE; xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = cd; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_ops; /* * See http://www.connectathon.org/talks96/nfstcp.pdf - client * has a 5 minute timer, server has a 6 minute timer. */ xprt->xp_idletimeout = 6 * 60; memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len); + CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); + CURVNET_RESTORE(); if (error) goto cleanup_svc_vc_create; memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); free(sa, M_SONAME); xprt_register(xprt); SOCKBUF_LOCK(&so->so_rcv); xprt->xp_upcallset = 1; soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt); SOCKBUF_UNLOCK(&so->so_rcv); /* * Throw the transport into the active list in case it already * has some data buffered. */ sx_xlock(&xprt->xp_lock); xprt_active(xprt); sx_xunlock(&xprt->xp_lock); return (xprt); cleanup_svc_vc_create: if (xprt) { sx_destroy(&xprt->xp_lock); svc_xprt_free(xprt); } if (cd) mem_free(cd, sizeof(*cd)); return (NULL); } /* * Create a new transport for a backchannel on a clnt_vc socket. */ SVCXPRT * svc_vc_create_backchannel(SVCPOOL *pool) { SVCXPRT *xprt = NULL; struct cf_conn *cd = NULL; cd = mem_alloc(sizeof(*cd)); cd->strm_stat = XPRT_IDLE; xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = NULL; xprt->xp_p1 = cd; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_backchannel_ops; return (xprt); } /* * This does all of the accept except the final call to soaccept. The * caller will call soaccept after dropping its locks (soaccept may * call malloc). */ int svc_vc_accept(struct socket *head, struct socket **sop) { int error = 0; struct socket *so; if ((head->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto done; } #ifdef MAC error = mac_socket_check_accept(curthread->td_ucred, head); if (error != 0) goto done; #endif ACCEPT_LOCK(); if (TAILQ_EMPTY(&head->so_comp)) { ACCEPT_UNLOCK(); error = EWOULDBLOCK; goto done; } so = TAILQ_FIRST(&head->so_comp); KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP")); KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP")); /* * Before changing the flags on the socket, we have to bump the * reference count. Otherwise, if the protocol calls sofree(), * the socket will be released due to a zero refcount. * XXX might not need soref() since this is simpler than kern_accept. */ SOCK_LOCK(so); /* soref() and so_state update */ soref(so); /* file descriptor reference */ TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; so->so_state |= (head->so_state & SS_NBIO); so->so_qstate &= ~SQ_COMP; so->so_head = NULL; SOCK_UNLOCK(so); ACCEPT_UNLOCK(); *sop = so; /* connection has been removed from the listen queue */ KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0); done: return (error); } /*ARGSUSED*/ static bool_t svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct socket *so = NULL; struct sockaddr *sa = NULL; int error; SVCXPRT *new_xprt; /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to accept a * connection from the socket and turn it into a new * transport. If the accept fails, we have drained all pending * connections so we call xprt_inactive(). */ sx_xlock(&xprt->xp_lock); error = svc_vc_accept(xprt->xp_socket, &so); if (error == EWOULDBLOCK) { /* * We must re-test for new connections after taking * the lock to protect us in the case where a new * connection arrives after our call to accept fails * with EWOULDBLOCK. */ ACCEPT_LOCK(); if (TAILQ_EMPTY(&xprt->xp_socket->so_comp)) xprt_inactive_self(xprt); ACCEPT_UNLOCK(); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(xprt->xp_socket, SO_RCV); } SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); xprt_inactive_self(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); sa = 0; error = soaccept(so, &sa); if (error) { /* * XXX not sure if I need to call sofree or soclose here. */ if (sa) free(sa, M_SONAME); return (FALSE); } /* * svc_vc_create_conn will call xprt_register - we don't need * to do anything with the new connection except derefence it. */ new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa); if (!new_xprt) { soclose(so); } else { SVC_RELEASE(new_xprt); } free(sa, M_SONAME); return (FALSE); /* there is never an rpc msg to be processed */ } /*ARGSUSED*/ static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *xprt) { return (XPRT_IDLE); } static void svc_vc_destroy_common(SVCXPRT *xprt) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(xprt->xp_socket, SO_RCV); } SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_socket) (void)soclose(xprt->xp_socket); if (xprt->xp_netid) (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); svc_xprt_free(xprt); } static void svc_vc_rendezvous_destroy(SVCXPRT *xprt) { svc_vc_destroy_common(xprt); } static void svc_vc_destroy(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; svc_vc_destroy_common(xprt); if (cd->mreq) m_freem(cd->mreq); if (cd->mpending) m_freem(cd->mpending); mem_free(cd, sizeof(*cd)); } static void svc_vc_backchannel_destroy(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; struct mbuf *m, *m2; svc_xprt_free(xprt); m = cd->mreq; while (m != NULL) { m2 = m; m = m->m_nextpkt; m_freem(m2); } mem_free(cd, sizeof(*cd)); } /*ARGSUSED*/ static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static bool_t svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static enum xprt_stat svc_vc_stat(SVCXPRT *xprt) { struct cf_conn *cd; cd = (struct cf_conn *)(xprt->xp_p1); if (cd->strm_stat == XPRT_DIED) return (XPRT_DIED); if (cd->mreq != NULL && cd->resid == 0 && cd->eor) return (XPRT_MOREREQS); if (soreadable(xprt->xp_socket)) return (XPRT_MOREREQS); return (XPRT_IDLE); } static bool_t svc_vc_ack(SVCXPRT *xprt, uint32_t *ack) { *ack = atomic_load_acq_32(&xprt->xp_snt_cnt); *ack -= sbused(&xprt->xp_socket->so_snd); return (TRUE); } static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *xprt) { struct cf_conn *cd; cd = (struct cf_conn *)(xprt->xp_p1); if (cd->mreq != NULL) return (XPRT_MOREREQS); return (XPRT_IDLE); } /* * If we have an mbuf chain in cd->mpending, try to parse a record from it, * leaving the result in cd->mreq. If we don't have a complete record, leave * the partial result in cd->mreq and try to read more from the socket. */ static int svc_vc_process_pending(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct socket *so = xprt->xp_socket; struct mbuf *m; /* * If cd->resid is non-zero, we have part of the * record already, otherwise we are expecting a record * marker. */ if (!cd->resid && cd->mpending) { /* * See if there is enough data buffered to * make up a record marker. Make sure we can * handle the case where the record marker is * split across more than one mbuf. */ size_t n = 0; uint32_t header; m = cd->mpending; while (n < sizeof(uint32_t) && m) { n += m->m_len; m = m->m_next; } if (n < sizeof(uint32_t)) { so->so_rcv.sb_lowat = sizeof(uint32_t) - n; return (FALSE); } m_copydata(cd->mpending, 0, sizeof(header), (char *)&header); header = ntohl(header); cd->eor = (header & 0x80000000) != 0; cd->resid = header & 0x7fffffff; m_adj(cd->mpending, sizeof(uint32_t)); } /* * Start pulling off mbufs from cd->mpending * until we either have a complete record or * we run out of data. We use m_split to pull * data - it will pull as much as possible and * split the last mbuf if necessary. */ while (cd->mpending && cd->resid) { m = cd->mpending; if (cd->mpending->m_next || cd->mpending->m_len > cd->resid) cd->mpending = m_split(cd->mpending, cd->resid, M_WAITOK); else cd->mpending = NULL; if (cd->mreq) m_last(cd->mreq)->m_next = m; else cd->mreq = m; while (m) { cd->resid -= m->m_len; m = m->m_next; } } /* * Block receive upcalls if we have more data pending, * otherwise report our need. */ if (cd->mpending) so->so_rcv.sb_lowat = INT_MAX; else so->so_rcv.sb_lowat = imax(1, imin(cd->resid, so->so_rcv.sb_hiwat / 2)); return (TRUE); } static bool_t svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct uio uio; struct mbuf *m; struct socket* so = xprt->xp_socket; XDR xdrs; int error, rcvflag; uint32_t xid_plus_direction[2]; /* * Serialise access to the socket and our own record parsing * state. */ sx_xlock(&xprt->xp_lock); for (;;) { /* If we have no request ready, check pending queue. */ while (cd->mpending && (cd->mreq == NULL || cd->resid != 0 || !cd->eor)) { if (!svc_vc_process_pending(xprt)) break; } /* Process and return complete request in cd->mreq. */ if (cd->mreq != NULL && cd->resid == 0 && cd->eor) { /* * Now, check for a backchannel reply. * The XID is in the first uint32_t of the reply * and the message direction is the second one. */ if ((cd->mreq->m_len >= sizeof(xid_plus_direction) || m_length(cd->mreq, NULL) >= sizeof(xid_plus_direction)) && xprt->xp_p2 != NULL) { m_copydata(cd->mreq, 0, sizeof(xid_plus_direction), (char *)xid_plus_direction); xid_plus_direction[0] = ntohl(xid_plus_direction[0]); xid_plus_direction[1] = ntohl(xid_plus_direction[1]); /* Check message direction. */ if (xid_plus_direction[1] == REPLY) { clnt_bck_svccall(xprt->xp_p2, cd->mreq, xid_plus_direction[0]); cd->mreq = NULL; continue; } } xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE); cd->mreq = NULL; /* Check for next request in a pending queue. */ svc_vc_process_pending(xprt); if (cd->mreq == NULL || cd->resid != 0) { SOCKBUF_LOCK(&so->so_rcv); if (!soreadable(so)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&so->so_rcv); } sx_xunlock(&xprt->xp_lock); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to * read as much as possible from the socket and put * the result in cd->mpending. If the read fails, * we have drained both cd->mpending and the socket so * we can call xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; m = NULL; rcvflag = MSG_DONTWAIT; error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag); if (error == EWOULDBLOCK) { /* * We must re-test for readability after * taking the lock to protect us in the case * where a new packet arrives on the socket * after our call to soreceive fails with * EWOULDBLOCK. */ SOCKBUF_LOCK(&so->so_rcv); if (!soreadable(so)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&so->so_rcv); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&so->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(so, SO_RCV); } SOCKBUF_UNLOCK(&so->so_rcv); xprt_inactive_self(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } if (!m) { /* * EOF - the other end has closed the socket. */ xprt_inactive_self(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } if (cd->mpending) m_last(cd->mpending)->m_next = m; else cd->mpending = m; } } static bool_t svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct ct_data *ct; struct mbuf *m; XDR xdrs; sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct == NULL) { sx_xunlock(&xprt->xp_lock); return (FALSE); } mtx_lock(&ct->ct_lock); m = cd->mreq; if (m == NULL) { xprt_inactive_self(xprt); mtx_unlock(&ct->ct_lock); sx_xunlock(&xprt->xp_lock); return (FALSE); } cd->mreq = m->m_nextpkt; mtx_unlock(&ct->ct_lock); sx_xunlock(&xprt->xp_lock); xdrmbuf_create(&xdrs, m, XDR_DECODE); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } static bool_t svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error, len; /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); len = mrep->m_pkthdr.len; *mtod(mrep, uint32_t *) = htonl(0x80000000 | (len - sizeof(uint32_t))); atomic_add_32(&xprt->xp_snd_cnt, len); error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL, 0, curthread); if (!error) { atomic_add_rel_32(&xprt->xp_snt_cnt, len); if (seq) *seq = xprt->xp_snd_cnt; stat = TRUE; } else atomic_subtract_32(&xprt->xp_snd_cnt, len); } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); } static bool_t svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { struct ct_data *ct; XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error; /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); *mtod(mrep, uint32_t *) = htonl(0x80000000 | (mrep->m_pkthdr.len - sizeof(uint32_t))); sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct != NULL) error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL, 0, curthread); else error = EPIPE; sx_xunlock(&xprt->xp_lock); if (!error) { stat = TRUE; } } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); } static bool_t svc_vc_null() { return (FALSE); } static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; if (soreadable(xprt->xp_socket)) xprt_active(xprt); return (SU_OK); } #if 0 /* * Get the effective UID of the sending process. Used by rpcbind, keyserv * and rpc.yppasswdd on AF_LOCAL. */ int __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) { int sock, ret; gid_t egid; uid_t euid; struct sockaddr *sa; sock = transp->xp_fd; sa = (struct sockaddr *)transp->xp_rtaddr; if (sa->sa_family == AF_LOCAL) { ret = getpeereid(sock, &euid, &egid); if (ret == 0) *uid = euid; return (ret); } else return (-1); } #endif