Index: head/sys/rpc/svc.c =================================================================== --- head/sys/rpc/svc.c (revision 280929) +++ head/sys/rpc/svc.c (revision 280930) @@ -1,1435 +1,1435 @@ /* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */ /*- * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)svc.c 2.4 88/08/11 4.0 RPCSRC"; #endif #include __FBSDID("$FreeBSD$"); /* * svc.c, Server-side remote procedure call interface. * * There are two sets of procedures here. The xprt routines are * for handling transport handles. The svc routines handle the * list of service routines. * * Copyright (C) 1984, Sun Microsystems, Inc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */ #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET) static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t, char *); static void svc_new_thread(SVCGROUP *grp); static void xprt_unregister_locked(SVCXPRT *xprt); -static void svc_change_space_used(SVCPOOL *pool, int delta); +static void svc_change_space_used(SVCPOOL *pool, long delta); static bool_t svc_request_space_available(SVCPOOL *pool); /* *************** SVCXPRT related stuff **************** */ static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS); static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS); static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS); SVCPOOL* svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base) { SVCPOOL *pool; SVCGROUP *grp; int g; pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO); mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF); pool->sp_name = name; pool->sp_state = SVCPOOL_INIT; pool->sp_proc = NULL; TAILQ_INIT(&pool->sp_callouts); TAILQ_INIT(&pool->sp_lcallouts); pool->sp_minthreads = 1; pool->sp_maxthreads = 1; pool->sp_groupcount = 1; for (g = 0; g < SVC_MAXGROUPS; g++) { grp = &pool->sp_groups[g]; mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF); grp->sg_pool = pool; grp->sg_state = SVCPOOL_ACTIVE; TAILQ_INIT(&grp->sg_xlist); TAILQ_INIT(&grp->sg_active); LIST_INIT(&grp->sg_idlethreads); grp->sg_minthreads = 1; grp->sg_maxthreads = 1; } /* - * Don't use more than a quarter of mbuf clusters or more than - * 45Mb buffering requests. + * Don't use more than a quarter of mbuf clusters. Nota bene: + * nmbclusters is an int, but nmbclusters*MCLBYTES may overflow + * on LP64 architectures, so cast to u_long to avoid undefined + * behavior. (ILP32 architectures cannot have nmbclusters + * large enough to overflow for other reasons.) */ - pool->sp_space_high = nmbclusters * MCLBYTES / 4; - if (pool->sp_space_high > 45 << 20) - pool->sp_space_high = 45 << 20; - pool->sp_space_low = 2 * pool->sp_space_high / 3; + pool->sp_space_high = (u_long)nmbclusters * MCLBYTES / 4; + pool->sp_space_low = (pool->sp_space_high / 3) * 2; sysctl_ctx_init(&pool->sp_sysctl); if (sysctl_base) { SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, "minthreads", CTLTYPE_INT | CTLFLAG_RW, pool, 0, svcpool_minthread_sysctl, "I", "Minimal number of threads"); SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, "maxthreads", CTLTYPE_INT | CTLFLAG_RW, pool, 0, svcpool_maxthread_sysctl, "I", "Maximal number of threads"); SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, "threads", CTLTYPE_INT | CTLFLAG_RD, pool, 0, svcpool_threads_sysctl, "I", "Current number of threads"); SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, "groups", CTLFLAG_RD, &pool->sp_groupcount, 0, "Number of thread groups"); - SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, + SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_used", CTLFLAG_RD, - &pool->sp_space_used, 0, + &pool->sp_space_used, "Space in parsed but not handled requests."); - SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, + SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_used_highest", CTLFLAG_RD, - &pool->sp_space_used_highest, 0, + &pool->sp_space_used_highest, "Highest space used since reboot."); - SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, + SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_high", CTLFLAG_RW, - &pool->sp_space_high, 0, + &pool->sp_space_high, "Maximum space in parsed but not handled requests."); - SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, + SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_low", CTLFLAG_RW, - &pool->sp_space_low, 0, + &pool->sp_space_low, "Low water mark for request space."); SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_throttled", CTLFLAG_RD, &pool->sp_space_throttled, 0, "Whether nfs requests are currently throttled"); SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_throttle_count", CTLFLAG_RD, &pool->sp_space_throttle_count, 0, "Count of times throttling based on request space has occurred"); } return pool; } void svcpool_destroy(SVCPOOL *pool) { SVCGROUP *grp; SVCXPRT *xprt, *nxprt; struct svc_callout *s; struct svc_loss_callout *sl; struct svcxprt_list cleanup; int g; TAILQ_INIT(&cleanup); for (g = 0; g < SVC_MAXGROUPS; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) { xprt_unregister_locked(xprt); TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); } mtx_unlock(&grp->sg_lock); } TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { SVC_RELEASE(xprt); } mtx_lock(&pool->sp_lock); while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) { mtx_unlock(&pool->sp_lock); svc_unreg(pool, s->sc_prog, s->sc_vers); mtx_lock(&pool->sp_lock); } while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) { mtx_unlock(&pool->sp_lock); svc_loss_unreg(pool, sl->slc_dispatch); mtx_lock(&pool->sp_lock); } mtx_unlock(&pool->sp_lock); for (g = 0; g < SVC_MAXGROUPS; g++) { grp = &pool->sp_groups[g]; mtx_destroy(&grp->sg_lock); } mtx_destroy(&pool->sp_lock); if (pool->sp_rcache) replay_freecache(pool->sp_rcache); sysctl_ctx_free(&pool->sp_sysctl); free(pool, M_RPC); } /* * Sysctl handler to get the present thread count on a pool */ static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS) { SVCPOOL *pool; int threads, error, g; pool = oidp->oid_arg1; threads = 0; mtx_lock(&pool->sp_lock); for (g = 0; g < pool->sp_groupcount; g++) threads += pool->sp_groups[g].sg_threadcount; mtx_unlock(&pool->sp_lock); error = sysctl_handle_int(oidp, &threads, 0, req); return (error); } /* * Sysctl handler to set the minimum thread count on a pool */ static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS) { SVCPOOL *pool; int newminthreads, error, g; pool = oidp->oid_arg1; newminthreads = pool->sp_minthreads; error = sysctl_handle_int(oidp, &newminthreads, 0, req); if (error == 0 && newminthreads != pool->sp_minthreads) { if (newminthreads > pool->sp_maxthreads) return (EINVAL); mtx_lock(&pool->sp_lock); pool->sp_minthreads = newminthreads; for (g = 0; g < pool->sp_groupcount; g++) { pool->sp_groups[g].sg_minthreads = max(1, pool->sp_minthreads / pool->sp_groupcount); } mtx_unlock(&pool->sp_lock); } return (error); } /* * Sysctl handler to set the maximum thread count on a pool */ static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS) { SVCPOOL *pool; int newmaxthreads, error, g; pool = oidp->oid_arg1; newmaxthreads = pool->sp_maxthreads; error = sysctl_handle_int(oidp, &newmaxthreads, 0, req); if (error == 0 && newmaxthreads != pool->sp_maxthreads) { if (newmaxthreads < pool->sp_minthreads) return (EINVAL); mtx_lock(&pool->sp_lock); pool->sp_maxthreads = newmaxthreads; for (g = 0; g < pool->sp_groupcount; g++) { pool->sp_groups[g].sg_maxthreads = max(1, pool->sp_maxthreads / pool->sp_groupcount); } mtx_unlock(&pool->sp_lock); } return (error); } /* * Activate a transport handle. */ void xprt_register(SVCXPRT *xprt) { SVCPOOL *pool = xprt->xp_pool; SVCGROUP *grp; int g; SVC_ACQUIRE(xprt); g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount; xprt->xp_group = grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); xprt->xp_registered = TRUE; xprt->xp_active = FALSE; TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link); mtx_unlock(&grp->sg_lock); } /* * De-activate a transport handle. Note: the locked version doesn't * release the transport - caller must do that after dropping the pool * lock. */ static void xprt_unregister_locked(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_assert(&grp->sg_lock, MA_OWNED); KASSERT(xprt->xp_registered == TRUE, ("xprt_unregister_locked: not registered")); xprt_inactive_locked(xprt); TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link); xprt->xp_registered = FALSE; } void xprt_unregister(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_lock(&grp->sg_lock); if (xprt->xp_registered == FALSE) { /* Already unregistered by another thread */ mtx_unlock(&grp->sg_lock); return; } xprt_unregister_locked(xprt); mtx_unlock(&grp->sg_lock); SVC_RELEASE(xprt); } /* * Attempt to assign a service thread to this transport. */ static int xprt_assignthread(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; SVCTHREAD *st; mtx_assert(&grp->sg_lock, MA_OWNED); st = LIST_FIRST(&grp->sg_idlethreads); if (st) { LIST_REMOVE(st, st_ilink); SVC_ACQUIRE(xprt); xprt->xp_thread = st; st->st_xprt = xprt; cv_signal(&st->st_cond); return (TRUE); } else { /* * See if we can create a new thread. The * actual thread creation happens in * svc_run_internal because our locking state * is poorly defined (we are typically called * from a socket upcall). Don't create more * than one thread per second. */ if (grp->sg_state == SVCPOOL_ACTIVE && grp->sg_lastcreatetime < time_uptime && grp->sg_threadcount < grp->sg_maxthreads) { grp->sg_state = SVCPOOL_THREADWANTED; } } return (FALSE); } void xprt_active(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_lock(&grp->sg_lock); if (!xprt->xp_registered) { /* * Race with xprt_unregister - we lose. */ mtx_unlock(&grp->sg_lock); return; } if (!xprt->xp_active) { xprt->xp_active = TRUE; if (xprt->xp_thread == NULL) { if (!svc_request_space_available(xprt->xp_pool) || !xprt_assignthread(xprt)) TAILQ_INSERT_TAIL(&grp->sg_active, xprt, xp_alink); } } mtx_unlock(&grp->sg_lock); } void xprt_inactive_locked(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_assert(&grp->sg_lock, MA_OWNED); if (xprt->xp_active) { if (xprt->xp_thread == NULL) TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); xprt->xp_active = FALSE; } } void xprt_inactive(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_lock(&grp->sg_lock); xprt_inactive_locked(xprt); mtx_unlock(&grp->sg_lock); } /* * Variant of xprt_inactive() for use only when sure that port is * assigned to thread. For example, withing receive handlers. */ void xprt_inactive_self(SVCXPRT *xprt) { KASSERT(xprt->xp_thread != NULL, ("xprt_inactive_self(%p) with NULL xp_thread", xprt)); xprt->xp_active = FALSE; } /* * Add a service program to the callout list. * The dispatch routine will be called when a rpc request for this * program number comes in. */ bool_t svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers, void (*dispatch)(struct svc_req *, SVCXPRT *), const struct netconfig *nconf) { SVCPOOL *pool = xprt->xp_pool; struct svc_callout *s; char *netid = NULL; int flag = 0; /* VARIABLES PROTECTED BY svc_lock: s, svc_head */ if (xprt->xp_netid) { netid = strdup(xprt->xp_netid, M_RPC); flag = 1; } else if (nconf && nconf->nc_netid) { netid = strdup(nconf->nc_netid, M_RPC); flag = 1; } /* must have been created with svc_raw_create */ if ((netid == NULL) && (flag == 1)) { return (FALSE); } mtx_lock(&pool->sp_lock); if ((s = svc_find(pool, prog, vers, netid)) != NULL) { if (netid) free(netid, M_RPC); if (s->sc_dispatch == dispatch) goto rpcb_it; /* he is registering another xptr */ mtx_unlock(&pool->sp_lock); return (FALSE); } s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT); if (s == NULL) { if (netid) free(netid, M_RPC); mtx_unlock(&pool->sp_lock); return (FALSE); } s->sc_prog = prog; s->sc_vers = vers; s->sc_dispatch = dispatch; s->sc_netid = netid; TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link); if ((xprt->xp_netid == NULL) && (flag == 1) && netid) ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC); rpcb_it: mtx_unlock(&pool->sp_lock); /* now register the information with the local binder service */ if (nconf) { bool_t dummy; struct netconfig tnc; struct netbuf nb; tnc = *nconf; nb.buf = &xprt->xp_ltaddr; nb.len = xprt->xp_ltaddr.ss_len; dummy = rpcb_set(prog, vers, &tnc, &nb); return (dummy); } return (TRUE); } /* * Remove a service program from the callout list. */ void svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers) { struct svc_callout *s; /* unregister the information anyway */ (void) rpcb_unset(prog, vers, NULL); mtx_lock(&pool->sp_lock); while ((s = svc_find(pool, prog, vers, NULL)) != NULL) { TAILQ_REMOVE(&pool->sp_callouts, s, sc_link); if (s->sc_netid) mem_free(s->sc_netid, sizeof (s->sc_netid) + 1); mem_free(s, sizeof (struct svc_callout)); } mtx_unlock(&pool->sp_lock); } /* * Add a service connection loss program to the callout list. * The dispatch routine will be called when some port in ths pool die. */ bool_t svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *)) { SVCPOOL *pool = xprt->xp_pool; struct svc_loss_callout *s; mtx_lock(&pool->sp_lock); TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) { if (s->slc_dispatch == dispatch) break; } if (s != NULL) { mtx_unlock(&pool->sp_lock); return (TRUE); } s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT); if (s == NULL) { mtx_unlock(&pool->sp_lock); return (FALSE); } s->slc_dispatch = dispatch; TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link); mtx_unlock(&pool->sp_lock); return (TRUE); } /* * Remove a service connection loss program from the callout list. */ void svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *)) { struct svc_loss_callout *s; mtx_lock(&pool->sp_lock); TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) { if (s->slc_dispatch == dispatch) { TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link); free(s, M_RPC); break; } } mtx_unlock(&pool->sp_lock); } /* ********************** CALLOUT list related stuff ************* */ /* * Search the callout list for a program number, return the callout * struct. */ static struct svc_callout * svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid) { struct svc_callout *s; mtx_assert(&pool->sp_lock, MA_OWNED); TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { if (s->sc_prog == prog && s->sc_vers == vers && (netid == NULL || s->sc_netid == NULL || strcmp(netid, s->sc_netid) == 0)) break; } return (s); } /* ******************* REPLY GENERATION ROUTINES ************ */ static bool_t svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply, struct mbuf *body) { SVCXPRT *xprt = rqstp->rq_xprt; bool_t ok; if (rqstp->rq_args) { m_freem(rqstp->rq_args); rqstp->rq_args = NULL; } if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, rply, svc_getrpccaller(rqstp), body); if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body)) return (FALSE); ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq); if (rqstp->rq_addr) { free(rqstp->rq_addr, M_SONAME); rqstp->rq_addr = NULL; } return (ok); } /* * Send a reply to an rpc request */ bool_t svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location) { struct rpc_msg rply; struct mbuf *m; XDR xdrs; bool_t ok; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = SUCCESS; rply.acpted_rply.ar_results.where = NULL; rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; m = m_getcl(M_WAITOK, MT_DATA, 0); xdrmbuf_create(&xdrs, m, XDR_ENCODE); ok = xdr_results(&xdrs, xdr_location); XDR_DESTROY(&xdrs); if (ok) { return (svc_sendreply_common(rqstp, &rply, m)); } else { m_freem(m); return (FALSE); } } bool_t svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m) { struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = SUCCESS; rply.acpted_rply.ar_results.where = NULL; rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; return (svc_sendreply_common(rqstp, &rply, m)); } /* * No procedure error reply */ void svcerr_noproc(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = PROC_UNAVAIL; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Can't decode args error reply */ void svcerr_decode(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = GARBAGE_ARGS; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Some system error */ void svcerr_systemerr(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = SYSTEM_ERR; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Authentication error reply */ void svcerr_auth(struct svc_req *rqstp, enum auth_stat why) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_DENIED; rply.rjcted_rply.rj_stat = AUTH_ERROR; rply.rjcted_rply.rj_why = why; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Auth too weak error reply */ void svcerr_weakauth(struct svc_req *rqstp) { svcerr_auth(rqstp, AUTH_TOOWEAK); } /* * Program unavailable error reply */ void svcerr_noprog(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = PROG_UNAVAIL; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Program version mismatch error reply */ void svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = PROG_MISMATCH; rply.acpted_rply.ar_vers.low = (uint32_t)low_vers; rply.acpted_rply.ar_vers.high = (uint32_t)high_vers; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Allocate a new server transport structure. All fields are * initialized to zero and xp_p3 is initialized to point at an * extension structure to hold various flags and authentication * parameters. */ SVCXPRT * svc_xprt_alloc() { SVCXPRT *xprt; SVCXPRT_EXT *ext; xprt = mem_alloc(sizeof(SVCXPRT)); memset(xprt, 0, sizeof(SVCXPRT)); ext = mem_alloc(sizeof(SVCXPRT_EXT)); memset(ext, 0, sizeof(SVCXPRT_EXT)); xprt->xp_p3 = ext; refcount_init(&xprt->xp_refs, 1); return (xprt); } /* * Free a server transport structure. */ void svc_xprt_free(xprt) SVCXPRT *xprt; { mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT)); mem_free(xprt, sizeof(SVCXPRT)); } /* ******************* SERVER INPUT STUFF ******************* */ /* * Read RPC requests from a transport and queue them to be * executed. We handle authentication and replay cache replies here. * Actually dispatching the RPC is deferred till svc_executereq. */ static enum xprt_stat svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret) { SVCPOOL *pool = xprt->xp_pool; struct svc_req *r; struct rpc_msg msg; struct mbuf *args; struct svc_loss_callout *s; enum xprt_stat stat; /* now receive msgs from xprtprt (support batch calls) */ r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO); msg.rm_call.cb_cred.oa_base = r->rq_credarea; msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES]; r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES]; if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) { enum auth_stat why; /* * Handle replays and authenticate before queuing the * request to be executed. */ SVC_ACQUIRE(xprt); r->rq_xprt = xprt; if (pool->sp_rcache) { struct rpc_msg repmsg; struct mbuf *repbody; enum replay_state rs; rs = replay_find(pool->sp_rcache, &msg, svc_getrpccaller(r), &repmsg, &repbody); switch (rs) { case RS_NEW: break; case RS_DONE: SVC_REPLY(xprt, &repmsg, r->rq_addr, repbody, &r->rq_reply_seq); if (r->rq_addr) { free(r->rq_addr, M_SONAME); r->rq_addr = NULL; } m_freem(args); goto call_done; default: m_freem(args); goto call_done; } } r->rq_xid = msg.rm_xid; r->rq_prog = msg.rm_call.cb_prog; r->rq_vers = msg.rm_call.cb_vers; r->rq_proc = msg.rm_call.cb_proc; r->rq_size = sizeof(*r) + m_length(args, NULL); r->rq_args = args; if ((why = _authenticate(r, &msg)) != AUTH_OK) { /* * RPCSEC_GSS uses this return code * for requests that form part of its * context establishment protocol and * should not be dispatched to the * application. */ if (why != RPCSEC_GSS_NODISPATCH) svcerr_auth(r, why); goto call_done; } if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) { svcerr_decode(r); goto call_done; } /* * Everything checks out, return request to caller. */ *rqstp_ret = r; r = NULL; } call_done: if (r) { svc_freereq(r); r = NULL; } if ((stat = SVC_STAT(xprt)) == XPRT_DIED) { TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) (*s->slc_dispatch)(xprt); xprt_unregister(xprt); } return (stat); } static void svc_executereq(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; SVCPOOL *pool = xprt->xp_pool; int prog_found; rpcvers_t low_vers; rpcvers_t high_vers; struct svc_callout *s; /* now match message with a registered service*/ prog_found = FALSE; low_vers = (rpcvers_t) -1L; high_vers = (rpcvers_t) 0L; TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { if (s->sc_prog == rqstp->rq_prog) { if (s->sc_vers == rqstp->rq_vers) { /* * We hand ownership of r to the * dispatch method - they must call * svc_freereq. */ (*s->sc_dispatch)(rqstp, xprt); return; } /* found correct version */ prog_found = TRUE; if (s->sc_vers < low_vers) low_vers = s->sc_vers; if (s->sc_vers > high_vers) high_vers = s->sc_vers; } /* found correct program */ } /* * if we got here, the program or version * is not served ... */ if (prog_found) svcerr_progvers(rqstp, low_vers, high_vers); else svcerr_noprog(rqstp); svc_freereq(rqstp); } static void svc_checkidle(SVCGROUP *grp) { SVCXPRT *xprt, *nxprt; time_t timo; struct svcxprt_list cleanup; TAILQ_INIT(&cleanup); TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) { /* * Only some transports have idle timers. Don't time * something out which is just waking up. */ if (!xprt->xp_idletimeout || xprt->xp_thread) continue; timo = xprt->xp_lastactive + xprt->xp_idletimeout; if (time_uptime > timo) { xprt_unregister_locked(xprt); TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); } } mtx_unlock(&grp->sg_lock); TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { SVC_RELEASE(xprt); } mtx_lock(&grp->sg_lock); } static void svc_assign_waiting_sockets(SVCPOOL *pool) { SVCGROUP *grp; SVCXPRT *xprt; int g; for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) { if (xprt_assignthread(xprt)) TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); else break; } mtx_unlock(&grp->sg_lock); } } static void -svc_change_space_used(SVCPOOL *pool, int delta) +svc_change_space_used(SVCPOOL *pool, long delta) { - unsigned int value; + unsigned long value; - value = atomic_fetchadd_int(&pool->sp_space_used, delta) + delta; + value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta; if (delta > 0) { if (value >= pool->sp_space_high && !pool->sp_space_throttled) { pool->sp_space_throttled = TRUE; pool->sp_space_throttle_count++; } if (value > pool->sp_space_used_highest) pool->sp_space_used_highest = value; } else { if (value < pool->sp_space_low && pool->sp_space_throttled) { pool->sp_space_throttled = FALSE; svc_assign_waiting_sockets(pool); } } } static bool_t svc_request_space_available(SVCPOOL *pool) { if (pool->sp_space_throttled) return (FALSE); return (TRUE); } static void svc_run_internal(SVCGROUP *grp, bool_t ismaster) { SVCPOOL *pool = grp->sg_pool; SVCTHREAD *st, *stpref; SVCXPRT *xprt; enum xprt_stat stat; struct svc_req *rqstp; struct proc *p; - size_t sz; + long sz; int error; st = mem_alloc(sizeof(*st)); mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF); st->st_pool = pool; st->st_xprt = NULL; STAILQ_INIT(&st->st_reqs); cv_init(&st->st_cond, "rpcsvc"); mtx_lock(&grp->sg_lock); /* * If we are a new thread which was spawned to cope with * increased load, set the state back to SVCPOOL_ACTIVE. */ if (grp->sg_state == SVCPOOL_THREADSTARTING) grp->sg_state = SVCPOOL_ACTIVE; while (grp->sg_state != SVCPOOL_CLOSING) { /* * Create new thread if requested. */ if (grp->sg_state == SVCPOOL_THREADWANTED) { grp->sg_state = SVCPOOL_THREADSTARTING; grp->sg_lastcreatetime = time_uptime; mtx_unlock(&grp->sg_lock); svc_new_thread(grp); mtx_lock(&grp->sg_lock); continue; } /* * Check for idle transports once per second. */ if (time_uptime > grp->sg_lastidlecheck) { grp->sg_lastidlecheck = time_uptime; svc_checkidle(grp); } xprt = st->st_xprt; if (!xprt) { /* * Enforce maxthreads count. */ if (grp->sg_threadcount > grp->sg_maxthreads) break; /* * Before sleeping, see if we can find an * active transport which isn't being serviced * by a thread. */ if (svc_request_space_available(pool) && (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) { TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); SVC_ACQUIRE(xprt); xprt->xp_thread = st; st->st_xprt = xprt; continue; } LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink); if (ismaster || (!ismaster && grp->sg_threadcount > grp->sg_minthreads)) error = cv_timedwait_sig(&st->st_cond, &grp->sg_lock, 5 * hz); else error = cv_wait_sig(&st->st_cond, &grp->sg_lock); if (st->st_xprt == NULL) LIST_REMOVE(st, st_ilink); /* * Reduce worker thread count when idle. */ if (error == EWOULDBLOCK) { if (!ismaster && (grp->sg_threadcount > grp->sg_minthreads) && !st->st_xprt) break; } else if (error != 0) { KASSERT(error == EINTR || error == ERESTART, ("non-signal error %d", error)); mtx_unlock(&grp->sg_lock); p = curproc; PROC_LOCK(p); if (P_SHOULDSTOP(p) || (p->p_flag & P_TOTAL_STOP) != 0) { thread_suspend_check(0); PROC_UNLOCK(p); mtx_lock(&grp->sg_lock); } else { PROC_UNLOCK(p); svc_exit(pool); mtx_lock(&grp->sg_lock); break; } } continue; } mtx_unlock(&grp->sg_lock); /* * Drain the transport socket and queue up any RPCs. */ xprt->xp_lastactive = time_uptime; do { if (!svc_request_space_available(pool)) break; rqstp = NULL; stat = svc_getreq(xprt, &rqstp); if (rqstp) { svc_change_space_used(pool, rqstp->rq_size); /* * See if the application has a preference * for some other thread. */ if (pool->sp_assign) { stpref = pool->sp_assign(st, rqstp); rqstp->rq_thread = stpref; STAILQ_INSERT_TAIL(&stpref->st_reqs, rqstp, rq_link); mtx_unlock(&stpref->st_lock); if (stpref != st) rqstp = NULL; } else { rqstp->rq_thread = st; STAILQ_INSERT_TAIL(&st->st_reqs, rqstp, rq_link); } } } while (rqstp == NULL && stat == XPRT_MOREREQS && grp->sg_state != SVCPOOL_CLOSING); /* * Move this transport to the end of the active list to * ensure fairness when multiple transports are active. * If this was the last queued request, svc_getreq will end * up calling xprt_inactive to remove from the active list. */ mtx_lock(&grp->sg_lock); xprt->xp_thread = NULL; st->st_xprt = NULL; if (xprt->xp_active) { if (!svc_request_space_available(pool) || !xprt_assignthread(xprt)) TAILQ_INSERT_TAIL(&grp->sg_active, xprt, xp_alink); } mtx_unlock(&grp->sg_lock); SVC_RELEASE(xprt); /* * Execute what we have queued. */ - sz = 0; mtx_lock(&st->st_lock); while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) { STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link); mtx_unlock(&st->st_lock); - sz += rqstp->rq_size; + sz = (long)rqstp->rq_size; svc_executereq(rqstp); + svc_change_space_used(pool, -sz); mtx_lock(&st->st_lock); } mtx_unlock(&st->st_lock); - svc_change_space_used(pool, -sz); mtx_lock(&grp->sg_lock); } if (st->st_xprt) { xprt = st->st_xprt; st->st_xprt = NULL; SVC_RELEASE(xprt); } KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit")); mtx_destroy(&st->st_lock); cv_destroy(&st->st_cond); mem_free(st, sizeof(*st)); grp->sg_threadcount--; if (!ismaster) wakeup(grp); mtx_unlock(&grp->sg_lock); } static void svc_thread_start(void *arg) { svc_run_internal((SVCGROUP *) arg, FALSE); kthread_exit(); } static void svc_new_thread(SVCGROUP *grp) { SVCPOOL *pool = grp->sg_pool; struct thread *td; grp->sg_threadcount++; kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0, "%s: service", pool->sp_name); } void svc_run(SVCPOOL *pool) { int g, i; struct proc *p; struct thread *td; SVCGROUP *grp; p = curproc; td = curthread; snprintf(td->td_name, sizeof(td->td_name), "%s: master", pool->sp_name); pool->sp_state = SVCPOOL_ACTIVE; pool->sp_proc = p; /* Choose group count based on number of threads and CPUs. */ pool->sp_groupcount = max(1, min(SVC_MAXGROUPS, min(pool->sp_maxthreads / 2, mp_ncpus) / 6)); for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; grp->sg_minthreads = max(1, pool->sp_minthreads / pool->sp_groupcount); grp->sg_maxthreads = max(1, pool->sp_maxthreads / pool->sp_groupcount); grp->sg_lastcreatetime = time_uptime; } /* Starting threads */ for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++) svc_new_thread(grp); } pool->sp_groups[0].sg_threadcount++; svc_run_internal(&pool->sp_groups[0], TRUE); /* Waiting for threads to stop. */ for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); while (grp->sg_threadcount > 0) msleep(grp, &grp->sg_lock, 0, "svcexit", 0); mtx_unlock(&grp->sg_lock); } } void svc_exit(SVCPOOL *pool) { SVCGROUP *grp; SVCTHREAD *st; int g; pool->sp_state = SVCPOOL_CLOSING; for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); if (grp->sg_state != SVCPOOL_CLOSING) { grp->sg_state = SVCPOOL_CLOSING; LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink) cv_signal(&st->st_cond); } mtx_unlock(&grp->sg_lock); } } bool_t svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) { struct mbuf *m; XDR xdrs; bool_t stat; m = rqstp->rq_args; rqstp->rq_args = NULL; xdrmbuf_create(&xdrs, m, XDR_DECODE); stat = xargs(&xdrs, args); XDR_DESTROY(&xdrs); return (stat); } bool_t svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) { XDR xdrs; if (rqstp->rq_addr) { free(rqstp->rq_addr, M_SONAME); rqstp->rq_addr = NULL; } xdrs.x_op = XDR_FREE; return (xargs(&xdrs, args)); } void svc_freereq(struct svc_req *rqstp) { SVCTHREAD *st; SVCPOOL *pool; st = rqstp->rq_thread; if (st) { pool = st->st_pool; if (pool->sp_done) pool->sp_done(st, rqstp); } if (rqstp->rq_auth.svc_ah_ops) SVCAUTH_RELEASE(&rqstp->rq_auth); if (rqstp->rq_xprt) { SVC_RELEASE(rqstp->rq_xprt); } if (rqstp->rq_addr) free(rqstp->rq_addr, M_SONAME); if (rqstp->rq_args) m_freem(rqstp->rq_args); free(rqstp, M_RPC); } Index: head/sys/rpc/svc.h =================================================================== --- head/sys/rpc/svc.h (revision 280929) +++ head/sys/rpc/svc.h (revision 280930) @@ -1,903 +1,903 @@ /* $NetBSD: svc.h,v 1.17 2000/06/02 22:57:56 fvdl Exp $ */ /*- * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * from: @(#)svc.h 1.35 88/12/17 SMI * from: @(#)svc.h 1.27 94/04/25 SMI * $FreeBSD$ */ /* * svc.h, Server-side remote procedure call interface. * * Copyright (C) 1986-1993 by Sun Microsystems, Inc. */ #ifndef _RPC_SVC_H #define _RPC_SVC_H #include #ifdef _KERNEL #include #include #include #include #include #include #endif /* * This interface must manage two items concerning remote procedure calling: * * 1) An arbitrary number of transport connections upon which rpc requests * are received. The two most notable transports are TCP and UDP; they are * created and registered by routines in svc_tcp.c and svc_udp.c, respectively; * they in turn call xprt_register and xprt_unregister. * * 2) An arbitrary number of locally registered services. Services are * described by the following four data: program number, version number, * "service dispatch" function, a transport handle, and a boolean that * indicates whether or not the exported program should be registered with a * local binder service; if true the program's number and version and the * port number from the transport handle are registered with the binder. * These data are registered with the rpc svc system via svc_register. * * A service's dispatch function is called whenever an rpc request comes in * on a transport. The request's program and version numbers must match * those of the registered service. The dispatch function is passed two * parameters, struct svc_req * and SVCXPRT *, defined below. */ /* * Service control requests */ #define SVCGET_VERSQUIET 1 #define SVCSET_VERSQUIET 2 #define SVCGET_CONNMAXREC 3 #define SVCSET_CONNMAXREC 4 /* * Operations for rpc_control(). */ #define RPC_SVC_CONNMAXREC_SET 0 /* set max rec size, enable nonblock */ #define RPC_SVC_CONNMAXREC_GET 1 enum xprt_stat { XPRT_DIED, XPRT_MOREREQS, XPRT_IDLE }; struct __rpc_svcxprt; struct mbuf; struct xp_ops { #ifdef _KERNEL /* receive incoming requests */ bool_t (*xp_recv)(struct __rpc_svcxprt *, struct rpc_msg *, struct sockaddr **, struct mbuf **); /* get transport status */ enum xprt_stat (*xp_stat)(struct __rpc_svcxprt *); /* get transport acknowledge sequence */ bool_t (*xp_ack)(struct __rpc_svcxprt *, uint32_t *); /* send reply */ bool_t (*xp_reply)(struct __rpc_svcxprt *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); /* destroy this struct */ void (*xp_destroy)(struct __rpc_svcxprt *); /* catch-all function */ bool_t (*xp_control)(struct __rpc_svcxprt *, const u_int, void *); #else /* receive incoming requests */ bool_t (*xp_recv)(struct __rpc_svcxprt *, struct rpc_msg *); /* get transport status */ enum xprt_stat (*xp_stat)(struct __rpc_svcxprt *); /* get arguments */ bool_t (*xp_getargs)(struct __rpc_svcxprt *, xdrproc_t, void *); /* send reply */ bool_t (*xp_reply)(struct __rpc_svcxprt *, struct rpc_msg *); /* free mem allocated for args */ bool_t (*xp_freeargs)(struct __rpc_svcxprt *, xdrproc_t, void *); /* destroy this struct */ void (*xp_destroy)(struct __rpc_svcxprt *); #endif }; #ifndef _KERNEL struct xp_ops2 { /* catch-all function */ bool_t (*xp_control)(struct __rpc_svcxprt *, const u_int, void *); }; #endif #ifdef _KERNEL struct __rpc_svcpool; struct __rpc_svcgroup; struct __rpc_svcthread; #endif /* * Server side transport handle. In the kernel, transports have a * reference count which tracks the number of currently assigned * worker threads plus one for the service pool's reference. * For NFSv4.1 sessions, a reference is also held for a backchannel. */ typedef struct __rpc_svcxprt { #ifdef _KERNEL volatile u_int xp_refs; struct sx xp_lock; struct __rpc_svcpool *xp_pool; /* owning pool (see below) */ struct __rpc_svcgroup *xp_group; /* owning group (see below) */ TAILQ_ENTRY(__rpc_svcxprt) xp_link; TAILQ_ENTRY(__rpc_svcxprt) xp_alink; bool_t xp_registered; /* xprt_register has been called */ bool_t xp_active; /* xprt_active has been called */ struct __rpc_svcthread *xp_thread; /* assigned service thread */ struct socket* xp_socket; const struct xp_ops *xp_ops; char *xp_netid; /* network token */ struct sockaddr_storage xp_ltaddr; /* local transport address */ struct sockaddr_storage xp_rtaddr; /* remote transport address */ void *xp_p1; /* private: for use by svc ops */ void *xp_p2; /* private: for use by svc ops */ void *xp_p3; /* private: for use by svc lib */ int xp_type; /* transport type */ int xp_idletimeout; /* idle time before closing */ time_t xp_lastactive; /* time of last RPC */ u_int64_t xp_sockref; /* set by nfsv4 to identify socket */ int xp_upcallset; /* socket upcall is set up */ uint32_t xp_snd_cnt; /* # of bytes to send to socket */ uint32_t xp_snt_cnt; /* # of bytes sent to socket */ #else int xp_fd; u_short xp_port; /* associated port number */ const struct xp_ops *xp_ops; int xp_addrlen; /* length of remote address */ struct sockaddr_in xp_raddr; /* remote addr. (backward ABI compat) */ /* XXX - fvdl stick this here for ABI backward compat reasons */ const struct xp_ops2 *xp_ops2; char *xp_tp; /* transport provider device name */ char *xp_netid; /* network token */ struct netbuf xp_ltaddr; /* local transport address */ struct netbuf xp_rtaddr; /* remote transport address */ struct opaque_auth xp_verf; /* raw response verifier */ void *xp_p1; /* private: for use by svc ops */ void *xp_p2; /* private: for use by svc ops */ void *xp_p3; /* private: for use by svc lib */ int xp_type; /* transport type */ #endif } SVCXPRT; /* * Interface to server-side authentication flavors. */ typedef struct __rpc_svcauth { struct svc_auth_ops { #ifdef _KERNEL int (*svc_ah_wrap)(struct __rpc_svcauth *, struct mbuf **); int (*svc_ah_unwrap)(struct __rpc_svcauth *, struct mbuf **); void (*svc_ah_release)(struct __rpc_svcauth *); #else int (*svc_ah_wrap)(struct __rpc_svcauth *, XDR *, xdrproc_t, caddr_t); int (*svc_ah_unwrap)(struct __rpc_svcauth *, XDR *, xdrproc_t, caddr_t); #endif } *svc_ah_ops; void *svc_ah_private; } SVCAUTH; /* * Server transport extensions (accessed via xp_p3). */ typedef struct __rpc_svcxprt_ext { int xp_flags; /* versquiet */ SVCAUTH xp_auth; /* interface to auth methods */ } SVCXPRT_EXT; #ifdef _KERNEL /* * The services list * Each entry represents a set of procedures (an rpc program). * The dispatch routine takes request structs and runs the * apropriate procedure. */ struct svc_callout { TAILQ_ENTRY(svc_callout) sc_link; rpcprog_t sc_prog; rpcvers_t sc_vers; char *sc_netid; void (*sc_dispatch)(struct svc_req *, SVCXPRT *); }; TAILQ_HEAD(svc_callout_list, svc_callout); /* * The services connection loss list * The dispatch routine takes request structs and runs the * apropriate procedure. */ struct svc_loss_callout { TAILQ_ENTRY(svc_loss_callout) slc_link; void (*slc_dispatch)(SVCXPRT *); }; TAILQ_HEAD(svc_loss_callout_list, svc_loss_callout); /* * Service request */ struct svc_req { STAILQ_ENTRY(svc_req) rq_link; /* list of requests for a thread */ struct __rpc_svcthread *rq_thread; /* thread which is to execute this */ uint32_t rq_xid; /* RPC transaction ID */ uint32_t rq_prog; /* service program number */ uint32_t rq_vers; /* service protocol version */ uint32_t rq_proc; /* the desired procedure */ size_t rq_size; /* space used by request */ struct mbuf *rq_args; /* XDR-encoded procedure arguments */ struct opaque_auth rq_cred; /* raw creds from the wire */ struct opaque_auth rq_verf; /* verifier for the reply */ void *rq_clntcred; /* read only cooked cred */ SVCAUTH rq_auth; /* interface to auth methods */ SVCXPRT *rq_xprt; /* associated transport */ struct sockaddr *rq_addr; /* reply address or NULL if connected */ void *rq_p1; /* application workspace */ int rq_p2; /* application workspace */ uint64_t rq_p3; /* application workspace */ uint32_t rq_reply_seq; /* reply socket sequence # */ char rq_credarea[3*MAX_AUTH_BYTES]; }; STAILQ_HEAD(svc_reqlist, svc_req); #define svc_getrpccaller(rq) \ ((rq)->rq_addr ? (rq)->rq_addr : \ (struct sockaddr *) &(rq)->rq_xprt->xp_rtaddr) /* * This structure is used to manage a thread which is executing * requests from a service pool. A service thread is in one of three * states: * * SVCTHREAD_SLEEPING waiting for a request to process * SVCTHREAD_ACTIVE processing a request * SVCTHREAD_EXITING exiting after finishing current request * * Threads which have no work to process sleep on the pool's sp_active * list. When a transport becomes active, it is assigned a service * thread to read and execute pending RPCs. */ typedef struct __rpc_svcthread { struct mtx_padalign st_lock; /* protects st_reqs field */ struct __rpc_svcpool *st_pool; SVCXPRT *st_xprt; /* transport we are processing */ struct svc_reqlist st_reqs; /* RPC requests to execute */ struct cv st_cond; /* sleeping for work */ LIST_ENTRY(__rpc_svcthread) st_ilink; /* idle threads list */ LIST_ENTRY(__rpc_svcthread) st_alink; /* application thread list */ int st_p2; /* application workspace */ uint64_t st_p3; /* application workspace */ } SVCTHREAD; LIST_HEAD(svcthread_list, __rpc_svcthread); /* * A thread group contain all information needed to assign subset of * transports to subset of threads. On systems with many CPUs and many * threads that allows to reduce lock congestion and improve performance. * Hundreds of threads on dozens of CPUs sharing the single pool lock do * not scale well otherwise. */ TAILQ_HEAD(svcxprt_list, __rpc_svcxprt); enum svcpool_state { SVCPOOL_INIT, /* svc_run not called yet */ SVCPOOL_ACTIVE, /* normal running state */ SVCPOOL_THREADWANTED, /* new service thread requested */ SVCPOOL_THREADSTARTING, /* new service thread started */ SVCPOOL_CLOSING /* svc_exit called */ }; typedef struct __rpc_svcgroup { struct mtx_padalign sg_lock; /* protect the thread/req lists */ struct __rpc_svcpool *sg_pool; enum svcpool_state sg_state; /* current pool state */ struct svcxprt_list sg_xlist; /* all transports in the group */ struct svcxprt_list sg_active; /* transports needing service */ struct svcthread_list sg_idlethreads; /* idle service threads */ int sg_minthreads; /* minimum service thread count */ int sg_maxthreads; /* maximum service thread count */ int sg_threadcount; /* current service thread count */ time_t sg_lastcreatetime; /* when we last started a thread */ time_t sg_lastidlecheck; /* when we last checked idle transports */ } SVCGROUP; /* * In the kernel, we can't use global variables to store lists of * transports etc. since otherwise we could not have two unrelated RPC * services running, each on its own thread. We solve this by * importing a tiny part of a Solaris kernel concept, SVCPOOL. * * A service pool contains a set of transports and service callbacks * for a set of related RPC services. The pool handle should be passed * when creating new transports etc. Future work may include extending * this to support something similar to the Solaris multi-threaded RPC * server. */ typedef SVCTHREAD *pool_assign_fn(SVCTHREAD *, struct svc_req *); typedef void pool_done_fn(SVCTHREAD *, struct svc_req *); #define SVC_MAXGROUPS 16 typedef struct __rpc_svcpool { struct mtx_padalign sp_lock; /* protect the transport lists */ const char *sp_name; /* pool name (e.g. "nfsd", "NLM" */ enum svcpool_state sp_state; /* current pool state */ struct proc *sp_proc; /* process which is in svc_run */ struct svc_callout_list sp_callouts; /* (prog,vers)->dispatch list */ struct svc_loss_callout_list sp_lcallouts; /* loss->dispatch list */ int sp_minthreads; /* minimum service thread count */ int sp_maxthreads; /* maximum service thread count */ /* * Hooks to allow an application to control request to thread * placement. */ pool_assign_fn *sp_assign; pool_done_fn *sp_done; /* * These variables are used to put an upper bound on the * amount of memory used by RPC requests which are queued * waiting for execution. */ - unsigned int sp_space_low; - unsigned int sp_space_high; - unsigned int sp_space_used; - unsigned int sp_space_used_highest; + unsigned long sp_space_low; + unsigned long sp_space_high; + unsigned long sp_space_used; + unsigned long sp_space_used_highest; bool_t sp_space_throttled; int sp_space_throttle_count; struct replay_cache *sp_rcache; /* optional replay cache */ struct sysctl_ctx_list sp_sysctl; int sp_groupcount; /* Number of groups in the pool. */ int sp_nextgroup; /* Next group to assign port. */ SVCGROUP sp_groups[SVC_MAXGROUPS]; /* Thread/port groups. */ } SVCPOOL; #else /* * Service request */ struct svc_req { uint32_t rq_prog; /* service program number */ uint32_t rq_vers; /* service protocol version */ uint32_t rq_proc; /* the desired procedure */ struct opaque_auth rq_cred; /* raw creds from the wire */ void *rq_clntcred; /* read only cooked cred */ SVCXPRT *rq_xprt; /* associated transport */ }; /* * Approved way of getting address of caller */ #define svc_getrpccaller(x) (&(x)->xp_rtaddr) #endif /* * Operations defined on an SVCXPRT handle * * SVCXPRT *xprt; * struct rpc_msg *msg; * xdrproc_t xargs; * void * argsp; */ #ifdef _KERNEL #define SVC_ACQUIRE(xprt) \ refcount_acquire(&(xprt)->xp_refs) #define SVC_RELEASE(xprt) \ if (refcount_release(&(xprt)->xp_refs)) \ SVC_DESTROY(xprt) #define SVC_RECV(xprt, msg, addr, args) \ (*(xprt)->xp_ops->xp_recv)((xprt), (msg), (addr), (args)) #define SVC_STAT(xprt) \ (*(xprt)->xp_ops->xp_stat)(xprt) #define SVC_ACK(xprt, ack) \ ((xprt)->xp_ops->xp_ack == NULL ? FALSE : \ ((ack) == NULL ? TRUE : (*(xprt)->xp_ops->xp_ack)((xprt), (ack)))) #define SVC_REPLY(xprt, msg, addr, m, seq) \ (*(xprt)->xp_ops->xp_reply) ((xprt), (msg), (addr), (m), (seq)) #define SVC_DESTROY(xprt) \ (*(xprt)->xp_ops->xp_destroy)(xprt) #define SVC_CONTROL(xprt, rq, in) \ (*(xprt)->xp_ops->xp_control)((xprt), (rq), (in)) #else #define SVC_RECV(xprt, msg) \ (*(xprt)->xp_ops->xp_recv)((xprt), (msg)) #define svc_recv(xprt, msg) \ (*(xprt)->xp_ops->xp_recv)((xprt), (msg)) #define SVC_STAT(xprt) \ (*(xprt)->xp_ops->xp_stat)(xprt) #define svc_stat(xprt) \ (*(xprt)->xp_ops->xp_stat)(xprt) #define SVC_GETARGS(xprt, xargs, argsp) \ (*(xprt)->xp_ops->xp_getargs)((xprt), (xargs), (argsp)) #define svc_getargs(xprt, xargs, argsp) \ (*(xprt)->xp_ops->xp_getargs)((xprt), (xargs), (argsp)) #define SVC_REPLY(xprt, msg) \ (*(xprt)->xp_ops->xp_reply) ((xprt), (msg)) #define svc_reply(xprt, msg) \ (*(xprt)->xp_ops->xp_reply) ((xprt), (msg)) #define SVC_FREEARGS(xprt, xargs, argsp) \ (*(xprt)->xp_ops->xp_freeargs)((xprt), (xargs), (argsp)) #define svc_freeargs(xprt, xargs, argsp) \ (*(xprt)->xp_ops->xp_freeargs)((xprt), (xargs), (argsp)) #define SVC_DESTROY(xprt) \ (*(xprt)->xp_ops->xp_destroy)(xprt) #define svc_destroy(xprt) \ (*(xprt)->xp_ops->xp_destroy)(xprt) #define SVC_CONTROL(xprt, rq, in) \ (*(xprt)->xp_ops2->xp_control)((xprt), (rq), (in)) #endif #define SVC_EXT(xprt) \ ((SVCXPRT_EXT *) xprt->xp_p3) #define SVC_AUTH(xprt) \ (SVC_EXT(xprt)->xp_auth) /* * Operations defined on an SVCAUTH handle */ #ifdef _KERNEL #define SVCAUTH_WRAP(auth, mp) \ ((auth)->svc_ah_ops->svc_ah_wrap(auth, mp)) #define SVCAUTH_UNWRAP(auth, mp) \ ((auth)->svc_ah_ops->svc_ah_unwrap(auth, mp)) #define SVCAUTH_RELEASE(auth) \ ((auth)->svc_ah_ops->svc_ah_release(auth)) #else #define SVCAUTH_WRAP(auth, xdrs, xfunc, xwhere) \ ((auth)->svc_ah_ops->svc_ah_wrap(auth, xdrs, xfunc, xwhere)) #define SVCAUTH_UNWRAP(auth, xdrs, xfunc, xwhere) \ ((auth)->svc_ah_ops->svc_ah_unwrap(auth, xdrs, xfunc, xwhere)) #endif /* * Service registration * * svc_reg(xprt, prog, vers, dispatch, nconf) * const SVCXPRT *xprt; * const rpcprog_t prog; * const rpcvers_t vers; * const void (*dispatch)(); * const struct netconfig *nconf; */ __BEGIN_DECLS extern bool_t svc_reg(SVCXPRT *, const rpcprog_t, const rpcvers_t, void (*)(struct svc_req *, SVCXPRT *), const struct netconfig *); __END_DECLS /* * Service un-registration * * svc_unreg(prog, vers) * const rpcprog_t prog; * const rpcvers_t vers; */ __BEGIN_DECLS #ifdef _KERNEL extern void svc_unreg(SVCPOOL *, const rpcprog_t, const rpcvers_t); #else extern void svc_unreg(const rpcprog_t, const rpcvers_t); #endif __END_DECLS #ifdef _KERNEL /* * Service connection loss registration * * svc_loss_reg(xprt, dispatch) * const SVCXPRT *xprt; * const void (*dispatch)(); */ __BEGIN_DECLS extern bool_t svc_loss_reg(SVCXPRT *, void (*)(SVCXPRT *)); __END_DECLS /* * Service connection loss un-registration * * svc_loss_unreg(xprt, dispatch) * const SVCXPRT *xprt; * const void (*dispatch)(); */ __BEGIN_DECLS extern void svc_loss_unreg(SVCPOOL *, void (*)(SVCXPRT *)); __END_DECLS #endif /* * Transport registration. * * xprt_register(xprt) * SVCXPRT *xprt; */ __BEGIN_DECLS extern void xprt_register(SVCXPRT *); __END_DECLS /* * Transport un-register * * xprt_unregister(xprt) * SVCXPRT *xprt; */ __BEGIN_DECLS extern void xprt_unregister(SVCXPRT *); extern void __xprt_unregister_unlocked(SVCXPRT *); __END_DECLS #ifdef _KERNEL /* * Called when a transport has pending requests. */ __BEGIN_DECLS extern void xprt_active(SVCXPRT *); extern void xprt_inactive(SVCXPRT *); extern void xprt_inactive_locked(SVCXPRT *); extern void xprt_inactive_self(SVCXPRT *); __END_DECLS #endif /* * When the service routine is called, it must first check to see if it * knows about the procedure; if not, it should call svcerr_noproc * and return. If so, it should deserialize its arguments via * SVC_GETARGS (defined above). If the deserialization does not work, * svcerr_decode should be called followed by a return. Successful * decoding of the arguments should be followed the execution of the * procedure's code and a call to svc_sendreply. * * Also, if the service refuses to execute the procedure due to too- * weak authentication parameters, svcerr_weakauth should be called. * Note: do not confuse access-control failure with weak authentication! * * NB: In pure implementations of rpc, the caller always waits for a reply * msg. This message is sent when svc_sendreply is called. * Therefore pure service implementations should always call * svc_sendreply even if the function logically returns void; use * xdr.h - xdr_void for the xdr routine. HOWEVER, tcp based rpc allows * for the abuse of pure rpc via batched calling or pipelining. In the * case of a batched call, svc_sendreply should NOT be called since * this would send a return message, which is what batching tries to avoid. * It is the service/protocol writer's responsibility to know which calls are * batched and which are not. Warning: responding to batch calls may * deadlock the caller and server processes! */ __BEGIN_DECLS #ifdef _KERNEL extern bool_t svc_sendreply(struct svc_req *, xdrproc_t, void *); extern bool_t svc_sendreply_mbuf(struct svc_req *, struct mbuf *); extern void svcerr_decode(struct svc_req *); extern void svcerr_weakauth(struct svc_req *); extern void svcerr_noproc(struct svc_req *); extern void svcerr_progvers(struct svc_req *, rpcvers_t, rpcvers_t); extern void svcerr_auth(struct svc_req *, enum auth_stat); extern void svcerr_noprog(struct svc_req *); extern void svcerr_systemerr(struct svc_req *); #else extern bool_t svc_sendreply(SVCXPRT *, xdrproc_t, void *); extern void svcerr_decode(SVCXPRT *); extern void svcerr_weakauth(SVCXPRT *); extern void svcerr_noproc(SVCXPRT *); extern void svcerr_progvers(SVCXPRT *, rpcvers_t, rpcvers_t); extern void svcerr_auth(SVCXPRT *, enum auth_stat); extern void svcerr_noprog(SVCXPRT *); extern void svcerr_systemerr(SVCXPRT *); #endif extern int rpc_reg(rpcprog_t, rpcvers_t, rpcproc_t, char *(*)(char *), xdrproc_t, xdrproc_t, char *); __END_DECLS /* * Lowest level dispatching -OR- who owns this process anyway. * Somebody has to wait for incoming requests and then call the correct * service routine. The routine svc_run does infinite waiting; i.e., * svc_run never returns. * Since another (co-existant) package may wish to selectively wait for * incoming calls or other events outside of the rpc architecture, the * routine svc_getreq is provided. It must be passed readfds, the * "in-place" results of a select system call (see select, section 2). */ #ifndef _KERNEL /* * Global keeper of rpc service descriptors in use * dynamic; must be inspected before each call to select */ extern int svc_maxfd; #ifdef FD_SETSIZE extern fd_set svc_fdset; #define svc_fds svc_fdset.fds_bits[0] /* compatibility */ #else extern int svc_fds; #endif /* def FD_SETSIZE */ #endif /* * a small program implemented by the svc_rpc implementation itself; * also see clnt.h for protocol numbers. */ __BEGIN_DECLS extern void rpctest_service(void); __END_DECLS __BEGIN_DECLS extern SVCXPRT *svc_xprt_alloc(void); extern void svc_xprt_free(SVCXPRT *); #ifndef _KERNEL extern void svc_getreq(int); extern void svc_getreqset(fd_set *); extern void svc_getreq_common(int); struct pollfd; extern void svc_getreq_poll(struct pollfd *, int); extern void svc_run(void); extern void svc_exit(void); #else extern void svc_run(SVCPOOL *); extern void svc_exit(SVCPOOL *); extern bool_t svc_getargs(struct svc_req *, xdrproc_t, void *); extern bool_t svc_freeargs(struct svc_req *, xdrproc_t, void *); extern void svc_freereq(struct svc_req *); #endif __END_DECLS /* * Socket to use on svcxxx_create call to get default socket */ #define RPC_ANYSOCK -1 #define RPC_ANYFD RPC_ANYSOCK /* * These are the existing service side transport implementations */ __BEGIN_DECLS #ifdef _KERNEL /* * Create a new service pool. */ extern SVCPOOL* svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base); /* * Destroy a service pool, including all registered transports. */ extern void svcpool_destroy(SVCPOOL *pool); /* * Transport independent svc_create routine. */ extern int svc_create(SVCPOOL *, void (*)(struct svc_req *, SVCXPRT *), const rpcprog_t, const rpcvers_t, const char *); /* * void (*dispatch)(); -- dispatch routine * const rpcprog_t prognum; -- program number * const rpcvers_t versnum; -- version number * const char *nettype; -- network type */ /* * Generic server creation routine. It takes a netconfig structure * instead of a nettype. */ extern SVCXPRT *svc_tp_create(SVCPOOL *, void (*)(struct svc_req *, SVCXPRT *), const rpcprog_t, const rpcvers_t, const char *uaddr, const struct netconfig *); /* * void (*dispatch)(); -- dispatch routine * const rpcprog_t prognum; -- program number * const rpcvers_t versnum; -- version number * const char *uaddr; -- universal address of service * const struct netconfig *nconf; -- netconfig structure */ extern SVCXPRT *svc_dg_create(SVCPOOL *, struct socket *, const size_t, const size_t); /* * struct socket *; -- open connection * const size_t sendsize; -- max send size * const size_t recvsize; -- max recv size */ extern SVCXPRT *svc_vc_create(SVCPOOL *, struct socket *, const size_t, const size_t); /* * struct socket *; -- open connection * const size_t sendsize; -- max send size * const size_t recvsize; -- max recv size */ extern SVCXPRT *svc_vc_create_backchannel(SVCPOOL *); extern void *clnt_bck_create(struct socket *, const rpcprog_t, const rpcvers_t); /* * struct socket *; -- server transport socket * const rpcprog_t prog; -- RPC program number * const rpcvers_t vers; -- RPC program version */ /* * Generic TLI create routine */ extern SVCXPRT *svc_tli_create(SVCPOOL *, struct socket *, const struct netconfig *, const struct t_bind *, const size_t, const size_t); /* * struct socket * so; -- connection end point * const struct netconfig *nconf; -- netconfig structure for network * const struct t_bind *bindaddr; -- local bind address * const size_t sendsz; -- max sendsize * const size_t recvsz; -- max recvsize */ #else /* !_KERNEL */ /* * Transport independent svc_create routine. */ extern int svc_create(void (*)(struct svc_req *, SVCXPRT *), const rpcprog_t, const rpcvers_t, const char *); /* * void (*dispatch)(); -- dispatch routine * const rpcprog_t prognum; -- program number * const rpcvers_t versnum; -- version number * const char *nettype; -- network type */ /* * Generic server creation routine. It takes a netconfig structure * instead of a nettype. */ extern SVCXPRT *svc_tp_create(void (*)(struct svc_req *, SVCXPRT *), const rpcprog_t, const rpcvers_t, const struct netconfig *); /* * void (*dispatch)(); -- dispatch routine * const rpcprog_t prognum; -- program number * const rpcvers_t versnum; -- version number * const struct netconfig *nconf; -- netconfig structure */ /* * Generic TLI create routine */ extern SVCXPRT *svc_tli_create(const int, const struct netconfig *, const struct t_bind *, const u_int, const u_int); /* * const int fd; -- connection end point * const struct netconfig *nconf; -- netconfig structure for network * const struct t_bind *bindaddr; -- local bind address * const u_int sendsz; -- max sendsize * const u_int recvsz; -- max recvsize */ /* * Connectionless and connectionful create routines */ extern SVCXPRT *svc_vc_create(const int, const u_int, const u_int); /* * const int fd; -- open connection end point * const u_int sendsize; -- max send size * const u_int recvsize; -- max recv size */ /* * Added for compatibility to old rpc 4.0. Obsoleted by svc_vc_create(). */ extern SVCXPRT *svcunix_create(int, u_int, u_int, char *); extern SVCXPRT *svc_dg_create(const int, const u_int, const u_int); /* * const int fd; -- open connection * const u_int sendsize; -- max send size * const u_int recvsize; -- max recv size */ /* * the routine takes any *open* connection * descriptor as its first input and is used for open connections. */ extern SVCXPRT *svc_fd_create(const int, const u_int, const u_int); /* * const int fd; -- open connection end point * const u_int sendsize; -- max send size * const u_int recvsize; -- max recv size */ /* * Added for compatibility to old rpc 4.0. Obsoleted by svc_fd_create(). */ extern SVCXPRT *svcunixfd_create(int, u_int, u_int); /* * Memory based rpc (for speed check and testing) */ extern SVCXPRT *svc_raw_create(void); /* * svc_dg_enable_cache() enables the cache on dg transports. */ int svc_dg_enablecache(SVCXPRT *, const u_int); int __rpc_get_local_uid(SVCXPRT *_transp, uid_t *_uid); #endif /* !_KERNEL */ __END_DECLS #ifndef _KERNEL /* for backward compatibility */ #include #endif #endif /* !_RPC_SVC_H */