diff --git a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c index d87a57ad514b..f375a184d1cc 100644 --- a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c +++ b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c @@ -1,1579 +1,1584 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 1990 The Regents of the University of California. * * Copyright (c) 2008 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* svc_rpcsec_gss.c Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: svc_auth_gss.c,v 1.27 2002/01/15 15:43:00 andros Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rpcsec_gss_int.h" static bool_t svc_rpc_gss_wrap(SVCAUTH *, struct mbuf **); static bool_t svc_rpc_gss_unwrap(SVCAUTH *, struct mbuf **); static void svc_rpc_gss_release(SVCAUTH *); static enum auth_stat svc_rpc_gss(struct svc_req *, struct rpc_msg *); static int rpc_gss_svc_getcred(struct svc_req *, struct ucred **, int *); static const struct svc_auth_ops svc_auth_gss_ops = { .svc_ah_wrap = svc_rpc_gss_wrap, .svc_ah_unwrap = svc_rpc_gss_unwrap, .svc_ah_release = svc_rpc_gss_release, }; struct sx svc_rpc_gss_lock; struct svc_rpc_gss_callback { SLIST_ENTRY(svc_rpc_gss_callback) cb_link; rpc_gss_callback_t cb_callback; }; static SLIST_HEAD(svc_rpc_gss_callback_list, svc_rpc_gss_callback) svc_rpc_gss_callbacks = SLIST_HEAD_INITIALIZER(svc_rpc_gss_callbacks); struct svc_rpc_gss_svc_name { SLIST_ENTRY(svc_rpc_gss_svc_name) sn_link; char *sn_principal; gss_OID sn_mech; u_int sn_req_time; gss_cred_id_t sn_cred; u_int sn_program; u_int sn_version; }; static SLIST_HEAD(svc_rpc_gss_svc_name_list, svc_rpc_gss_svc_name) svc_rpc_gss_svc_names = SLIST_HEAD_INITIALIZER(svc_rpc_gss_svc_names); enum svc_rpc_gss_client_state { CLIENT_NEW, /* still authenticating */ CLIENT_ESTABLISHED, /* context established */ CLIENT_STALE /* garbage to collect */ }; #define SVC_RPC_GSS_SEQWINDOW 128 struct svc_rpc_gss_clientid { unsigned long ci_hostid; uint32_t ci_boottime; uint32_t ci_id; }; struct svc_rpc_gss_client { TAILQ_ENTRY(svc_rpc_gss_client) cl_link; TAILQ_ENTRY(svc_rpc_gss_client) cl_alllink; volatile u_int cl_refs; struct sx cl_lock; struct svc_rpc_gss_clientid cl_id; time_t cl_expiration; /* when to gc */ enum svc_rpc_gss_client_state cl_state; /* client state */ bool_t cl_locked; /* fixed service+qop */ gss_ctx_id_t cl_ctx; /* context id */ gss_cred_id_t cl_creds; /* delegated creds */ gss_name_t cl_cname; /* client name */ struct svc_rpc_gss_svc_name *cl_sname; /* server name used */ rpc_gss_rawcred_t cl_rawcred; /* raw credentials */ rpc_gss_ucred_t cl_ucred; /* unix-style credentials */ struct ucred *cl_cred; /* kernel-style credentials */ int cl_rpcflavor; /* RPC pseudo sec flavor */ bool_t cl_done_callback; /* TRUE after call */ void *cl_cookie; /* user cookie from callback */ gid_t cl_gid_storage[NGROUPS]; gss_OID cl_mech; /* mechanism */ gss_qop_t cl_qop; /* quality of protection */ uint32_t cl_seqlast; /* sequence window origin */ uint32_t cl_seqmask[SVC_RPC_GSS_SEQWINDOW/32]; /* bitmask of seqnums */ }; TAILQ_HEAD(svc_rpc_gss_client_list, svc_rpc_gss_client); /* * This structure holds enough information to unwrap arguments or wrap * results for a given request. We use the rq_clntcred area for this * (which is a per-request buffer). */ struct svc_rpc_gss_cookedcred { struct svc_rpc_gss_client *cc_client; rpc_gss_service_t cc_service; uint32_t cc_seq; }; #define CLIENT_HASH_SIZE 256 #define CLIENT_MAX 1024 u_int svc_rpc_gss_client_max = CLIENT_MAX; u_int svc_rpc_gss_client_hash_size = CLIENT_HASH_SIZE; SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "RPC"); SYSCTL_NODE(_kern_rpc, OID_AUTO, gss, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GSS"); SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_max, CTLFLAG_RW, &svc_rpc_gss_client_max, 0, "Max number of rpc-gss clients"); SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_hash, CTLFLAG_RDTUN, &svc_rpc_gss_client_hash_size, 0, "Size of rpc-gss client hash table"); static u_int svc_rpc_gss_lifetime_max = 0; SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, lifetime_max, CTLFLAG_RW, &svc_rpc_gss_lifetime_max, 0, "Maximum lifetime (seconds) of rpc-gss clients"); static u_int svc_rpc_gss_client_count; SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_count, CTLFLAG_RD, &svc_rpc_gss_client_count, 0, "Number of rpc-gss clients"); struct svc_rpc_gss_client_list *svc_rpc_gss_client_hash; struct svc_rpc_gss_client_list svc_rpc_gss_clients; static uint32_t svc_rpc_gss_next_clientid = 1; static void svc_rpc_gss_init(void *arg) { int i; svc_rpc_gss_client_hash = mem_alloc(sizeof(struct svc_rpc_gss_client_list) * svc_rpc_gss_client_hash_size); for (i = 0; i < svc_rpc_gss_client_hash_size; i++) TAILQ_INIT(&svc_rpc_gss_client_hash[i]); TAILQ_INIT(&svc_rpc_gss_clients); svc_auth_reg(RPCSEC_GSS, svc_rpc_gss, rpc_gss_svc_getcred); sx_init(&svc_rpc_gss_lock, "gsslock"); } SYSINIT(svc_rpc_gss_init, SI_SUB_KMEM, SI_ORDER_ANY, svc_rpc_gss_init, NULL); bool_t rpc_gss_set_callback(rpc_gss_callback_t *cb) { struct svc_rpc_gss_callback *scb; scb = mem_alloc(sizeof(struct svc_rpc_gss_callback)); if (!scb) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } scb->cb_callback = *cb; sx_xlock(&svc_rpc_gss_lock); SLIST_INSERT_HEAD(&svc_rpc_gss_callbacks, scb, cb_link); sx_xunlock(&svc_rpc_gss_lock); return (TRUE); } void rpc_gss_clear_callback(rpc_gss_callback_t *cb) { struct svc_rpc_gss_callback *scb; sx_xlock(&svc_rpc_gss_lock); SLIST_FOREACH(scb, &svc_rpc_gss_callbacks, cb_link) { if (scb->cb_callback.program == cb->program && scb->cb_callback.version == cb->version && scb->cb_callback.callback == cb->callback) { SLIST_REMOVE(&svc_rpc_gss_callbacks, scb, svc_rpc_gss_callback, cb_link); sx_xunlock(&svc_rpc_gss_lock); mem_free(scb, sizeof(*scb)); return; } } sx_xunlock(&svc_rpc_gss_lock); } static bool_t rpc_gss_acquire_svc_cred(struct svc_rpc_gss_svc_name *sname) { OM_uint32 maj_stat, min_stat; gss_buffer_desc namebuf; gss_name_t name; gss_OID_set_desc oid_set; oid_set.count = 1; oid_set.elements = sname->sn_mech; namebuf.value = (void *) sname->sn_principal; namebuf.length = strlen(sname->sn_principal); maj_stat = gss_import_name(&min_stat, &namebuf, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) return (FALSE); if (sname->sn_cred != GSS_C_NO_CREDENTIAL) gss_release_cred(&min_stat, &sname->sn_cred); maj_stat = gss_acquire_cred(&min_stat, name, sname->sn_req_time, &oid_set, GSS_C_ACCEPT, &sname->sn_cred, NULL, NULL); if (maj_stat != GSS_S_COMPLETE) { gss_release_name(&min_stat, &name); return (FALSE); } gss_release_name(&min_stat, &name); return (TRUE); } bool_t rpc_gss_set_svc_name(const char *principal, const char *mechanism, u_int req_time, u_int program, u_int version) { struct svc_rpc_gss_svc_name *sname; gss_OID mech_oid; if (!rpc_gss_mech_to_oid(mechanism, &mech_oid)) return (FALSE); sname = mem_alloc(sizeof(*sname)); if (!sname) return (FALSE); sname->sn_principal = strdup(principal, M_RPC); sname->sn_mech = mech_oid; sname->sn_req_time = req_time; sname->sn_cred = GSS_C_NO_CREDENTIAL; sname->sn_program = program; sname->sn_version = version; if (!rpc_gss_acquire_svc_cred(sname)) { free(sname->sn_principal, M_RPC); mem_free(sname, sizeof(*sname)); return (FALSE); } sx_xlock(&svc_rpc_gss_lock); SLIST_INSERT_HEAD(&svc_rpc_gss_svc_names, sname, sn_link); sx_xunlock(&svc_rpc_gss_lock); return (TRUE); } void rpc_gss_clear_svc_name(u_int program, u_int version) { OM_uint32 min_stat; struct svc_rpc_gss_svc_name *sname; sx_xlock(&svc_rpc_gss_lock); SLIST_FOREACH(sname, &svc_rpc_gss_svc_names, sn_link) { if (sname->sn_program == program && sname->sn_version == version) { SLIST_REMOVE(&svc_rpc_gss_svc_names, sname, svc_rpc_gss_svc_name, sn_link); sx_xunlock(&svc_rpc_gss_lock); gss_release_cred(&min_stat, &sname->sn_cred); free(sname->sn_principal, M_RPC); mem_free(sname, sizeof(*sname)); return; } } sx_xunlock(&svc_rpc_gss_lock); } bool_t rpc_gss_get_principal_name(rpc_gss_principal_t *principal, const char *mech, const char *name, const char *node, const char *domain) { OM_uint32 maj_stat, min_stat; gss_OID mech_oid; size_t namelen; gss_buffer_desc buf; gss_name_t gss_name, gss_mech_name; rpc_gss_principal_t result; if (!rpc_gss_mech_to_oid(mech, &mech_oid)) return (FALSE); /* * Construct a gss_buffer containing the full name formatted * as "name/node@domain" where node and domain are optional. */ namelen = strlen(name) + 1; if (node) { namelen += strlen(node) + 1; } if (domain) { namelen += strlen(domain) + 1; } buf.value = mem_alloc(namelen); buf.length = namelen; strcpy((char *) buf.value, name); if (node) { strcat((char *) buf.value, "/"); strcat((char *) buf.value, node); } if (domain) { strcat((char *) buf.value, "@"); strcat((char *) buf.value, domain); } /* * Convert that to a gss_name_t and then convert that to a * mechanism name in the selected mechanism. */ maj_stat = gss_import_name(&min_stat, &buf, GSS_C_NT_USER_NAME, &gss_name); mem_free(buf.value, buf.length); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_import_name", mech_oid, maj_stat, min_stat); return (FALSE); } maj_stat = gss_canonicalize_name(&min_stat, gss_name, mech_oid, &gss_mech_name); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_canonicalize_name", mech_oid, maj_stat, min_stat); gss_release_name(&min_stat, &gss_name); return (FALSE); } gss_release_name(&min_stat, &gss_name); /* * Export the mechanism name and use that to construct the * rpc_gss_principal_t result. */ maj_stat = gss_export_name(&min_stat, gss_mech_name, &buf); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_export_name", mech_oid, maj_stat, min_stat); gss_release_name(&min_stat, &gss_mech_name); return (FALSE); } gss_release_name(&min_stat, &gss_mech_name); result = mem_alloc(sizeof(int) + buf.length); if (!result) { gss_release_buffer(&min_stat, &buf); return (FALSE); } result->len = buf.length; memcpy(result->name, buf.value, buf.length); gss_release_buffer(&min_stat, &buf); *principal = result; return (TRUE); } bool_t rpc_gss_getcred(struct svc_req *req, rpc_gss_rawcred_t **rcred, rpc_gss_ucred_t **ucred, void **cookie) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; if (req->rq_cred.oa_flavor != RPCSEC_GSS) return (FALSE); cc = req->rq_clntcred; client = cc->cc_client; if (rcred) *rcred = &client->cl_rawcred; if (ucred) *ucred = &client->cl_ucred; if (cookie) *cookie = client->cl_cookie; return (TRUE); } /* * This simpler interface is used by svc_getcred to copy the cred data * into a kernel cred structure. */ static int rpc_gss_svc_getcred(struct svc_req *req, struct ucred **crp, int *flavorp) { struct ucred *cr; struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_ucred_t *uc; if (req->rq_cred.oa_flavor != RPCSEC_GSS) return (FALSE); cc = req->rq_clntcred; client = cc->cc_client; if (flavorp) *flavorp = client->cl_rpcflavor; if (client->cl_cred) { *crp = crhold(client->cl_cred); return (TRUE); } uc = &client->cl_ucred; cr = client->cl_cred = crget(); cr->cr_uid = cr->cr_ruid = cr->cr_svuid = uc->uid; cr->cr_rgid = cr->cr_svgid = uc->gid; crsetgroups(cr, uc->gidlen, uc->gidlist); - cr->cr_prison = &prison0; +#ifdef VNET_NFSD + if (jailed(curthread->td_ucred)) + cr->cr_prison = curthread->td_ucred->cr_prison; + else +#endif + cr->cr_prison = &prison0; prison_hold(cr->cr_prison); *crp = crhold(cr); return (TRUE); } int rpc_gss_svc_max_data_length(struct svc_req *req, int max_tp_unit_len) { struct svc_rpc_gss_cookedcred *cc = req->rq_clntcred; struct svc_rpc_gss_client *client = cc->cc_client; int want_conf; OM_uint32 max; OM_uint32 maj_stat, min_stat; int result; switch (client->cl_rawcred.service) { case rpc_gss_svc_none: return (max_tp_unit_len); break; case rpc_gss_svc_default: case rpc_gss_svc_integrity: want_conf = FALSE; break; case rpc_gss_svc_privacy: want_conf = TRUE; break; default: return (0); } maj_stat = gss_wrap_size_limit(&min_stat, client->cl_ctx, want_conf, client->cl_qop, max_tp_unit_len, &max); if (maj_stat == GSS_S_COMPLETE) { result = (int) max; if (result < 0) result = 0; return (result); } else { rpc_gss_log_status("gss_wrap_size_limit", client->cl_mech, maj_stat, min_stat); return (0); } } static struct svc_rpc_gss_client * svc_rpc_gss_find_client(struct svc_rpc_gss_clientid *id) { struct svc_rpc_gss_client *client; struct svc_rpc_gss_client_list *list; struct timeval boottime; unsigned long hostid; rpc_gss_log_debug("in svc_rpc_gss_find_client(%d)", id->ci_id); getcredhostid(curthread->td_ucred, &hostid); getboottime(&boottime); if (id->ci_hostid != hostid || id->ci_boottime != boottime.tv_sec) return (NULL); list = &svc_rpc_gss_client_hash[id->ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_FOREACH(client, list, cl_link) { if (client->cl_id.ci_id == id->ci_id) { /* * Move this client to the front of the LRU * list. */ TAILQ_REMOVE(&svc_rpc_gss_clients, client, cl_alllink); TAILQ_INSERT_HEAD(&svc_rpc_gss_clients, client, cl_alllink); refcount_acquire(&client->cl_refs); break; } } sx_xunlock(&svc_rpc_gss_lock); return (client); } static struct svc_rpc_gss_client * svc_rpc_gss_create_client(void) { struct svc_rpc_gss_client *client; struct svc_rpc_gss_client_list *list; struct timeval boottime; unsigned long hostid; rpc_gss_log_debug("in svc_rpc_gss_create_client()"); client = mem_alloc(sizeof(struct svc_rpc_gss_client)); memset(client, 0, sizeof(struct svc_rpc_gss_client)); /* * Set the initial value of cl_refs to two. One for the caller * and the other to hold onto the client structure until it expires. */ refcount_init(&client->cl_refs, 2); sx_init(&client->cl_lock, "GSS-client"); getcredhostid(curthread->td_ucred, &hostid); client->cl_id.ci_hostid = hostid; getboottime(&boottime); client->cl_id.ci_boottime = boottime.tv_sec; client->cl_id.ci_id = svc_rpc_gss_next_clientid++; /* * Start the client off with a short expiration time. We will * try to get a saner value from the client creds later. */ client->cl_state = CLIENT_NEW; client->cl_locked = FALSE; client->cl_expiration = time_uptime + 5*60; list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_INSERT_HEAD(list, client, cl_link); TAILQ_INSERT_HEAD(&svc_rpc_gss_clients, client, cl_alllink); svc_rpc_gss_client_count++; sx_xunlock(&svc_rpc_gss_lock); return (client); } static void svc_rpc_gss_destroy_client(struct svc_rpc_gss_client *client) { OM_uint32 min_stat; rpc_gss_log_debug("in svc_rpc_gss_destroy_client()"); if (client->cl_ctx) gss_delete_sec_context(&min_stat, &client->cl_ctx, GSS_C_NO_BUFFER); if (client->cl_cname) gss_release_name(&min_stat, &client->cl_cname); if (client->cl_rawcred.client_principal) mem_free(client->cl_rawcred.client_principal, sizeof(*client->cl_rawcred.client_principal) + client->cl_rawcred.client_principal->len); if (client->cl_cred) crfree(client->cl_cred); sx_destroy(&client->cl_lock); mem_free(client, sizeof(*client)); } /* * Drop a reference to a client and free it if that was the last reference. */ static void svc_rpc_gss_release_client(struct svc_rpc_gss_client *client) { if (!refcount_release(&client->cl_refs)) return; svc_rpc_gss_destroy_client(client); } /* * Remove a client from our global lists. * Must be called with svc_rpc_gss_lock held. */ static void svc_rpc_gss_forget_client_locked(struct svc_rpc_gss_client *client) { struct svc_rpc_gss_client_list *list; sx_assert(&svc_rpc_gss_lock, SX_XLOCKED); list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; TAILQ_REMOVE(list, client, cl_link); TAILQ_REMOVE(&svc_rpc_gss_clients, client, cl_alllink); svc_rpc_gss_client_count--; } /* * Remove a client from our global lists and free it if we can. */ static void svc_rpc_gss_forget_client(struct svc_rpc_gss_client *client) { struct svc_rpc_gss_client_list *list; struct svc_rpc_gss_client *tclient; list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_FOREACH(tclient, list, cl_link) { /* * Make sure this client has not already been removed * from the lists by svc_rpc_gss_forget_client() or * svc_rpc_gss_forget_client_locked(). */ if (client == tclient) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); svc_rpc_gss_release_client(client); return; } } sx_xunlock(&svc_rpc_gss_lock); } static void svc_rpc_gss_timeout_clients(void) { struct svc_rpc_gss_client *client; time_t now = time_uptime; rpc_gss_log_debug("in svc_rpc_gss_timeout_clients()"); /* * First enforce the max client limit. We keep * svc_rpc_gss_clients in LRU order. */ sx_xlock(&svc_rpc_gss_lock); client = TAILQ_LAST(&svc_rpc_gss_clients, svc_rpc_gss_client_list); while (svc_rpc_gss_client_count > svc_rpc_gss_client_max && client != NULL) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); svc_rpc_gss_release_client(client); sx_xlock(&svc_rpc_gss_lock); client = TAILQ_LAST(&svc_rpc_gss_clients, svc_rpc_gss_client_list); } again: TAILQ_FOREACH(client, &svc_rpc_gss_clients, cl_alllink) { if (client->cl_state == CLIENT_STALE || now > client->cl_expiration) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); rpc_gss_log_debug("expiring client %p", client); svc_rpc_gss_release_client(client); sx_xlock(&svc_rpc_gss_lock); goto again; } } sx_xunlock(&svc_rpc_gss_lock); } #ifdef DEBUG /* * OID<->string routines. These are uuuuugly. */ static OM_uint32 gss_oid_to_str(OM_uint32 *minor_status, gss_OID oid, gss_buffer_t oid_str) { char numstr[128]; unsigned long number; int numshift; size_t string_length; size_t i; unsigned char *cp; char *bp; /* Decoded according to krb5/gssapi_krb5.c */ /* First determine the size of the string */ string_length = 0; number = 0; numshift = 0; cp = (unsigned char *) oid->elements; number = (unsigned long) cp[0]; sprintf(numstr, "%ld ", number/40); string_length += strlen(numstr); sprintf(numstr, "%ld ", number%40); string_length += strlen(numstr); for (i=1; ilength; i++) { if ( (size_t) (numshift+7) < (sizeof(unsigned long)*8)) { number = (number << 7) | (cp[i] & 0x7f); numshift += 7; } else { *minor_status = 0; return(GSS_S_FAILURE); } if ((cp[i] & 0x80) == 0) { sprintf(numstr, "%ld ", number); string_length += strlen(numstr); number = 0; numshift = 0; } } /* * If we get here, we've calculated the length of "n n n ... n ". Add 4 * here for "{ " and "}\0". */ string_length += 4; if ((bp = malloc(string_length, M_GSSAPI, M_WAITOK | M_ZERO))) { strcpy(bp, "{ "); number = (unsigned long) cp[0]; sprintf(numstr, "%ld ", number/40); strcat(bp, numstr); sprintf(numstr, "%ld ", number%40); strcat(bp, numstr); number = 0; cp = (unsigned char *) oid->elements; for (i=1; ilength; i++) { number = (number << 7) | (cp[i] & 0x7f); if ((cp[i] & 0x80) == 0) { sprintf(numstr, "%ld ", number); strcat(bp, numstr); number = 0; } } strcat(bp, "}"); oid_str->length = strlen(bp)+1; oid_str->value = (void *) bp; *minor_status = 0; return(GSS_S_COMPLETE); } *minor_status = 0; return(GSS_S_FAILURE); } #endif static void svc_rpc_gss_build_ucred(struct svc_rpc_gss_client *client, const gss_name_t name) { OM_uint32 maj_stat, min_stat; rpc_gss_ucred_t *uc = &client->cl_ucred; int numgroups; uc->uid = 65534; uc->gid = 65534; uc->gidlist = client->cl_gid_storage; numgroups = NGROUPS; maj_stat = gss_pname_to_unix_cred(&min_stat, name, client->cl_mech, &uc->uid, &uc->gid, &numgroups, &uc->gidlist[0]); if (GSS_ERROR(maj_stat)) uc->gidlen = 0; else uc->gidlen = numgroups; } static void svc_rpc_gss_set_flavor(struct svc_rpc_gss_client *client) { static gss_OID_desc krb5_mech_oid = {9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" }; /* * Attempt to translate mech type and service into a * 'pseudo flavor'. Hardwire in krb5 support for now. */ if (kgss_oid_equal(client->cl_mech, &krb5_mech_oid)) { switch (client->cl_rawcred.service) { case rpc_gss_svc_default: case rpc_gss_svc_none: client->cl_rpcflavor = RPCSEC_GSS_KRB5; break; case rpc_gss_svc_integrity: client->cl_rpcflavor = RPCSEC_GSS_KRB5I; break; case rpc_gss_svc_privacy: client->cl_rpcflavor = RPCSEC_GSS_KRB5P; break; } } else { client->cl_rpcflavor = RPCSEC_GSS; } } static bool_t svc_rpc_gss_accept_sec_context(struct svc_rpc_gss_client *client, struct svc_req *rqst, struct rpc_gss_init_res *gr, struct rpc_gss_cred *gc) { gss_buffer_desc recv_tok; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags; OM_uint32 cred_lifetime; struct svc_rpc_gss_svc_name *sname; rpc_gss_log_debug("in svc_rpc_gss_accept_context()"); /* Deserialize arguments. */ memset(&recv_tok, 0, sizeof(recv_tok)); if (!svc_getargs(rqst, (xdrproc_t) xdr_gss_buffer_desc, (caddr_t) &recv_tok)) { client->cl_state = CLIENT_STALE; return (FALSE); } /* * First time round, try all the server names we have until * one matches. Afterwards, stick with that one. */ sx_xlock(&svc_rpc_gss_lock); if (!client->cl_sname) { SLIST_FOREACH(sname, &svc_rpc_gss_svc_names, sn_link) { if (sname->sn_program == rqst->rq_prog && sname->sn_version == rqst->rq_vers) { retry: gr->gr_major = gss_accept_sec_context( &gr->gr_minor, &client->cl_ctx, sname->sn_cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &client->cl_cname, &mech, &gr->gr_token, &ret_flags, &cred_lifetime, &client->cl_creds); if (gr->gr_major == GSS_S_CREDENTIALS_EXPIRED) { /* * Either our creds really did * expire or gssd was * restarted. */ if (rpc_gss_acquire_svc_cred(sname)) goto retry; } client->cl_sname = sname; break; } } if (!sname) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_tok); sx_xunlock(&svc_rpc_gss_lock); return (FALSE); } } else { gr->gr_major = gss_accept_sec_context( &gr->gr_minor, &client->cl_ctx, client->cl_sname->sn_cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &client->cl_cname, &mech, &gr->gr_token, &ret_flags, &cred_lifetime, NULL); } sx_xunlock(&svc_rpc_gss_lock); xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_tok); /* * If we get an error from gss_accept_sec_context, send the * reply anyway so that the client gets a chance to see what * is wrong. */ if (gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("accept_sec_context", client->cl_mech, gr->gr_major, gr->gr_minor); client->cl_state = CLIENT_STALE; return (TRUE); } gr->gr_handle.value = &client->cl_id; gr->gr_handle.length = sizeof(client->cl_id); gr->gr_win = SVC_RPC_GSS_SEQWINDOW; /* Save client info. */ client->cl_mech = mech; client->cl_qop = GSS_C_QOP_DEFAULT; client->cl_done_callback = FALSE; if (gr->gr_major == GSS_S_COMPLETE) { gss_buffer_desc export_name; /* * Change client expiration time to be near when the * client creds expire (or 24 hours if we can't figure * that out). */ if (cred_lifetime == GSS_C_INDEFINITE) cred_lifetime = 24*60*60; /* * Cap cred_lifetime if sysctl kern.rpc.gss.lifetime_max is set. */ if (svc_rpc_gss_lifetime_max > 0 && cred_lifetime > svc_rpc_gss_lifetime_max) cred_lifetime = svc_rpc_gss_lifetime_max; client->cl_expiration = time_uptime + cred_lifetime; /* * Fill in cred details in the rawcred structure. */ client->cl_rawcred.version = RPCSEC_GSS_VERSION; rpc_gss_oid_to_mech(mech, &client->cl_rawcred.mechanism); maj_stat = gss_export_name(&min_stat, client->cl_cname, &export_name); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_export_name", client->cl_mech, maj_stat, min_stat); return (FALSE); } client->cl_rawcred.client_principal = mem_alloc(sizeof(*client->cl_rawcred.client_principal) + export_name.length); client->cl_rawcred.client_principal->len = export_name.length; memcpy(client->cl_rawcred.client_principal->name, export_name.value, export_name.length); gss_release_buffer(&min_stat, &export_name); client->cl_rawcred.svc_principal = client->cl_sname->sn_principal; client->cl_rawcred.service = gc->gc_svc; /* * Use gss_pname_to_uid to map to unix creds. For * kerberos5, this uses krb5_aname_to_localname. */ svc_rpc_gss_build_ucred(client, client->cl_cname); svc_rpc_gss_set_flavor(client); gss_release_name(&min_stat, &client->cl_cname); #ifdef DEBUG { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); rpc_gss_log_debug("accepted context for %s with " "", client->cl_rawcred.client_principal->name, mechname.length, (char *)mechname.value, client->cl_qop, client->cl_rawcred.service); gss_release_buffer(&min_stat, &mechname); } #endif /* DEBUG */ } return (TRUE); } static bool_t svc_rpc_gss_validate(struct svc_rpc_gss_client *client, struct rpc_msg *msg, gss_qop_t *qop, rpc_gss_proc_t gcproc) { struct opaque_auth *oa; gss_buffer_desc rpcbuf, checksum; OM_uint32 maj_stat, min_stat; gss_qop_t qop_state; int32_t rpchdr[128 / sizeof(int32_t)]; int32_t *buf; rpc_gss_log_debug("in svc_rpc_gss_validate()"); memset(rpchdr, 0, sizeof(rpchdr)); /* Reconstruct RPC header for signing (from xdr_callmsg). */ buf = rpchdr; IXDR_PUT_LONG(buf, msg->rm_xid); IXDR_PUT_ENUM(buf, msg->rm_direction); IXDR_PUT_LONG(buf, msg->rm_call.cb_rpcvers); IXDR_PUT_LONG(buf, msg->rm_call.cb_prog); IXDR_PUT_LONG(buf, msg->rm_call.cb_vers); IXDR_PUT_LONG(buf, msg->rm_call.cb_proc); oa = &msg->rm_call.cb_cred; IXDR_PUT_ENUM(buf, oa->oa_flavor); IXDR_PUT_LONG(buf, oa->oa_length); if (oa->oa_length) { memcpy((caddr_t)buf, oa->oa_base, oa->oa_length); buf += RNDUP(oa->oa_length) / sizeof(int32_t); } rpcbuf.value = rpchdr; rpcbuf.length = (u_char *)buf - (u_char *)rpchdr; checksum.value = msg->rm_call.cb_verf.oa_base; checksum.length = msg->rm_call.cb_verf.oa_length; maj_stat = gss_verify_mic(&min_stat, client->cl_ctx, &rpcbuf, &checksum, &qop_state); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_verify_mic", client->cl_mech, maj_stat, min_stat); /* * A bug in some versions of the Linux client generates a * Destroy operation with a bogus encrypted checksum. Deleting * the credential handle for that case causes the mount to fail. * Since the checksum is bogus (gss_verify_mic() failed), it * doesn't make sense to destroy the handle and not doing so * fixes the Linux mount. */ if (gcproc != RPCSEC_GSS_DESTROY) client->cl_state = CLIENT_STALE; return (FALSE); } *qop = qop_state; return (TRUE); } static bool_t svc_rpc_gss_nextverf(struct svc_rpc_gss_client *client, struct svc_req *rqst, u_int seq) { gss_buffer_desc signbuf; gss_buffer_desc mic; OM_uint32 maj_stat, min_stat; uint32_t nseq; rpc_gss_log_debug("in svc_rpc_gss_nextverf()"); nseq = htonl(seq); signbuf.value = &nseq; signbuf.length = sizeof(nseq); maj_stat = gss_get_mic(&min_stat, client->cl_ctx, client->cl_qop, &signbuf, &mic); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_get_mic", client->cl_mech, maj_stat, min_stat); client->cl_state = CLIENT_STALE; return (FALSE); } KASSERT(mic.length <= MAX_AUTH_BYTES, ("MIC too large for RPCSEC_GSS")); rqst->rq_verf.oa_flavor = RPCSEC_GSS; rqst->rq_verf.oa_length = mic.length; bcopy(mic.value, rqst->rq_verf.oa_base, mic.length); gss_release_buffer(&min_stat, &mic); return (TRUE); } static bool_t svc_rpc_gss_callback(struct svc_rpc_gss_client *client, struct svc_req *rqst) { struct svc_rpc_gss_callback *scb; rpc_gss_lock_t lock; void *cookie; bool_t cb_res; bool_t result; /* * See if we have a callback for this guy. */ result = TRUE; SLIST_FOREACH(scb, &svc_rpc_gss_callbacks, cb_link) { if (scb->cb_callback.program == rqst->rq_prog && scb->cb_callback.version == rqst->rq_vers) { /* * This one matches. Call the callback and see * if it wants to veto or something. */ lock.locked = FALSE; lock.raw_cred = &client->cl_rawcred; cb_res = scb->cb_callback.callback(rqst, client->cl_creds, client->cl_ctx, &lock, &cookie); if (!cb_res) { client->cl_state = CLIENT_STALE; result = FALSE; break; } /* * The callback accepted the connection - it * is responsible for freeing client->cl_creds * now. */ client->cl_creds = GSS_C_NO_CREDENTIAL; client->cl_locked = lock.locked; client->cl_cookie = cookie; return (TRUE); } } /* * Either no callback exists for this program/version or one * of the callbacks rejected the connection. We just need to * clean up the delegated client creds, if any. */ if (client->cl_creds) { OM_uint32 min_ver; gss_release_cred(&min_ver, &client->cl_creds); } return (result); } static bool_t svc_rpc_gss_check_replay(struct svc_rpc_gss_client *client, uint32_t seq) { uint32_t offset; int word, bit; bool_t result; sx_xlock(&client->cl_lock); if (seq <= client->cl_seqlast) { /* * The request sequence number is less than * the largest we have seen so far. If it is * outside the window or if we have seen a * request with this sequence before, silently * discard it. */ offset = client->cl_seqlast - seq; if (offset >= SVC_RPC_GSS_SEQWINDOW) { result = FALSE; goto out; } word = offset / 32; bit = offset % 32; if (client->cl_seqmask[word] & (1 << bit)) { result = FALSE; goto out; } } result = TRUE; out: sx_xunlock(&client->cl_lock); return (result); } static void svc_rpc_gss_update_seq(struct svc_rpc_gss_client *client, uint32_t seq) { int offset, i, word, bit; uint32_t carry, newcarry; sx_xlock(&client->cl_lock); if (seq > client->cl_seqlast) { /* * This request has a sequence number greater * than any we have seen so far. Advance the * seq window and set bit zero of the window * (which corresponds to the new sequence * number) */ offset = seq - client->cl_seqlast; while (offset > 32) { for (i = (SVC_RPC_GSS_SEQWINDOW / 32) - 1; i > 0; i--) { client->cl_seqmask[i] = client->cl_seqmask[i-1]; } client->cl_seqmask[0] = 0; offset -= 32; } carry = 0; for (i = 0; i < SVC_RPC_GSS_SEQWINDOW / 32; i++) { newcarry = client->cl_seqmask[i] >> (32 - offset); client->cl_seqmask[i] = (client->cl_seqmask[i] << offset) | carry; carry = newcarry; } client->cl_seqmask[0] |= 1; client->cl_seqlast = seq; } else { offset = client->cl_seqlast - seq; word = offset / 32; bit = offset % 32; client->cl_seqmask[word] |= (1 << bit); } sx_xunlock(&client->cl_lock); } enum auth_stat svc_rpc_gss(struct svc_req *rqst, struct rpc_msg *msg) { OM_uint32 min_stat; XDR xdrs; struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; struct rpc_gss_cred gc; struct rpc_gss_init_res gr; gss_qop_t qop; int call_stat; enum auth_stat result; rpc_gss_log_debug("in svc_rpc_gss()"); /* Garbage collect old clients. */ svc_rpc_gss_timeout_clients(); /* Initialize reply. */ rqst->rq_verf = _null_auth; /* Deserialize client credentials. */ if (rqst->rq_cred.oa_length <= 0) return (AUTH_BADCRED); memset(&gc, 0, sizeof(gc)); xdrmem_create(&xdrs, rqst->rq_cred.oa_base, rqst->rq_cred.oa_length, XDR_DECODE); if (!xdr_rpc_gss_cred(&xdrs, &gc)) { XDR_DESTROY(&xdrs); return (AUTH_BADCRED); } XDR_DESTROY(&xdrs); client = NULL; /* Check version. */ if (gc.gc_version != RPCSEC_GSS_VERSION) { result = AUTH_BADCRED; goto out; } /* Check the proc and find the client (or create it) */ if (gc.gc_proc == RPCSEC_GSS_INIT) { if (gc.gc_handle.length != 0) { result = AUTH_BADCRED; goto out; } client = svc_rpc_gss_create_client(); } else { struct svc_rpc_gss_clientid *p; if (gc.gc_handle.length != sizeof(*p)) { result = AUTH_BADCRED; goto out; } p = gc.gc_handle.value; client = svc_rpc_gss_find_client(p); if (!client) { /* * Can't find the client - we may have * destroyed it - tell the other side to * re-authenticate. */ result = RPCSEC_GSS_CREDPROBLEM; goto out; } } cc = rqst->rq_clntcred; cc->cc_client = client; cc->cc_service = gc.gc_svc; cc->cc_seq = gc.gc_seq; /* * The service and sequence number must be ignored for * RPCSEC_GSS_INIT and RPCSEC_GSS_CONTINUE_INIT. */ if (gc.gc_proc != RPCSEC_GSS_INIT && gc.gc_proc != RPCSEC_GSS_CONTINUE_INIT) { /* * Check for sequence number overflow. */ if (gc.gc_seq >= MAXSEQ) { result = RPCSEC_GSS_CTXPROBLEM; goto out; } /* * Check for valid service. */ if (gc.gc_svc != rpc_gss_svc_none && gc.gc_svc != rpc_gss_svc_integrity && gc.gc_svc != rpc_gss_svc_privacy) { result = AUTH_BADCRED; goto out; } } /* Handle RPCSEC_GSS control procedure. */ switch (gc.gc_proc) { case RPCSEC_GSS_INIT: case RPCSEC_GSS_CONTINUE_INIT: if (rqst->rq_proc != NULLPROC) { result = AUTH_REJECTEDCRED; break; } memset(&gr, 0, sizeof(gr)); if (!svc_rpc_gss_accept_sec_context(client, rqst, &gr, &gc)) { result = AUTH_REJECTEDCRED; break; } if (gr.gr_major == GSS_S_COMPLETE) { /* * We borrow the space for the call verf to * pack our reply verf. */ rqst->rq_verf = msg->rm_call.cb_verf; if (!svc_rpc_gss_nextverf(client, rqst, gr.gr_win)) { result = AUTH_REJECTEDCRED; break; } } else { rqst->rq_verf = _null_auth; } call_stat = svc_sendreply(rqst, (xdrproc_t) xdr_rpc_gss_init_res, (caddr_t) &gr); gss_release_buffer(&min_stat, &gr.gr_token); if (!call_stat) { result = AUTH_FAILED; break; } if (gr.gr_major == GSS_S_COMPLETE) client->cl_state = CLIENT_ESTABLISHED; result = RPCSEC_GSS_NODISPATCH; break; case RPCSEC_GSS_DATA: case RPCSEC_GSS_DESTROY: if (!svc_rpc_gss_check_replay(client, gc.gc_seq)) { result = RPCSEC_GSS_NODISPATCH; break; } if (!svc_rpc_gss_validate(client, msg, &qop, gc.gc_proc)) { result = RPCSEC_GSS_CREDPROBLEM; break; } /* * We borrow the space for the call verf to pack our * reply verf. */ rqst->rq_verf = msg->rm_call.cb_verf; if (!svc_rpc_gss_nextverf(client, rqst, gc.gc_seq)) { result = RPCSEC_GSS_CTXPROBLEM; break; } svc_rpc_gss_update_seq(client, gc.gc_seq); /* * Change the SVCAUTH ops on the request to point at * our own code so that we can unwrap the arguments * and wrap the result. The caller will re-set this on * every request to point to a set of null wrap/unwrap * methods. Acquire an extra reference to the client * which will be released by svc_rpc_gss_release() * after the request has finished processing. */ refcount_acquire(&client->cl_refs); rqst->rq_auth.svc_ah_ops = &svc_auth_gss_ops; rqst->rq_auth.svc_ah_private = cc; if (gc.gc_proc == RPCSEC_GSS_DATA) { /* * We might be ready to do a callback to the server to * see if it wants to accept/reject the connection. */ sx_xlock(&client->cl_lock); if (!client->cl_done_callback) { client->cl_done_callback = TRUE; client->cl_qop = qop; client->cl_rawcred.qop = _rpc_gss_num_to_qop( client->cl_rawcred.mechanism, qop); if (!svc_rpc_gss_callback(client, rqst)) { result = AUTH_REJECTEDCRED; sx_xunlock(&client->cl_lock); break; } } sx_xunlock(&client->cl_lock); /* * If the server has locked this client to a * particular service+qop pair, enforce that * restriction now. */ if (client->cl_locked) { if (client->cl_rawcred.service != gc.gc_svc) { result = AUTH_FAILED; break; } else if (client->cl_qop != qop) { result = AUTH_BADVERF; break; } } /* * If the qop changed, look up the new qop * name for rawcred. */ if (client->cl_qop != qop) { client->cl_qop = qop; client->cl_rawcred.qop = _rpc_gss_num_to_qop( client->cl_rawcred.mechanism, qop); } /* * Make sure we use the right service value * for unwrap/wrap. */ if (client->cl_rawcred.service != gc.gc_svc) { client->cl_rawcred.service = gc.gc_svc; svc_rpc_gss_set_flavor(client); } result = AUTH_OK; } else { if (rqst->rq_proc != NULLPROC) { result = AUTH_REJECTEDCRED; break; } call_stat = svc_sendreply(rqst, (xdrproc_t) xdr_void, (caddr_t) NULL); if (!call_stat) { result = AUTH_FAILED; break; } svc_rpc_gss_forget_client(client); result = RPCSEC_GSS_NODISPATCH; break; } break; default: result = AUTH_BADCRED; break; } out: if (client) svc_rpc_gss_release_client(client); xdr_free((xdrproc_t) xdr_rpc_gss_cred, (char *) &gc); return (result); } static bool_t svc_rpc_gss_wrap(SVCAUTH *auth, struct mbuf **mp) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_wrap()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; if (client->cl_state != CLIENT_ESTABLISHED || cc->cc_service == rpc_gss_svc_none || *mp == NULL) { return (TRUE); } return (xdr_rpc_gss_wrap_data(mp, client->cl_ctx, client->cl_qop, cc->cc_service, cc->cc_seq)); } static bool_t svc_rpc_gss_unwrap(SVCAUTH *auth, struct mbuf **mp) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_unwrap()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; if (client->cl_state != CLIENT_ESTABLISHED || cc->cc_service == rpc_gss_svc_none) { return (TRUE); } return (xdr_rpc_gss_unwrap_data(mp, client->cl_ctx, client->cl_qop, cc->cc_service, cc->cc_seq)); } static void svc_rpc_gss_release(SVCAUTH *auth) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_release()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; svc_rpc_gss_release_client(client); } diff --git a/sys/rpc/svc.c b/sys/rpc/svc.c index be0f08ebca9d..065da0cc23a4 100644 --- a/sys/rpc/svc.c +++ b/sys/rpc/svc.c @@ -1,1482 +1,1483 @@ /* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)svc.c 2.4 88/08/11 4.0 RPCSRC"; #endif #include __FBSDID("$FreeBSD$"); /* * svc.c, Server-side remote procedure call interface. * * There are two sets of procedures here. The xprt routines are * for handling transport handles. The svc routines handle the * list of service routines. * * Copyright (C) 1984, Sun Microsystems, Inc. */ #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */ #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET) static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t, char *); static void svc_new_thread(SVCGROUP *grp); static void xprt_unregister_locked(SVCXPRT *xprt); static void svc_change_space_used(SVCPOOL *pool, long delta); static bool_t svc_request_space_available(SVCPOOL *pool); static void svcpool_cleanup(SVCPOOL *pool); /* *************** SVCXPRT related stuff **************** */ static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS); static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS); static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS); SVCPOOL* svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base) { SVCPOOL *pool; SVCGROUP *grp; int g; pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO); mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF); pool->sp_name = name; pool->sp_state = SVCPOOL_INIT; pool->sp_proc = NULL; TAILQ_INIT(&pool->sp_callouts); TAILQ_INIT(&pool->sp_lcallouts); pool->sp_minthreads = 1; pool->sp_maxthreads = 1; pool->sp_groupcount = 1; for (g = 0; g < SVC_MAXGROUPS; g++) { grp = &pool->sp_groups[g]; mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF); grp->sg_pool = pool; grp->sg_state = SVCPOOL_ACTIVE; TAILQ_INIT(&grp->sg_xlist); TAILQ_INIT(&grp->sg_active); LIST_INIT(&grp->sg_idlethreads); grp->sg_minthreads = 1; grp->sg_maxthreads = 1; } /* * Don't use more than a quarter of mbuf clusters. Nota bene: * nmbclusters is an int, but nmbclusters*MCLBYTES may overflow * on LP64 architectures, so cast to u_long to avoid undefined * behavior. (ILP32 architectures cannot have nmbclusters * large enough to overflow for other reasons.) */ pool->sp_space_high = (u_long)nmbclusters * MCLBYTES / 4; pool->sp_space_low = (pool->sp_space_high / 3) * 2; sysctl_ctx_init(&pool->sp_sysctl); - if (sysctl_base) { + if (!jailed(curthread->td_ucred) && sysctl_base) { SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, "minthreads", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pool, 0, svcpool_minthread_sysctl, "I", "Minimal number of threads"); SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, "maxthreads", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pool, 0, svcpool_maxthread_sysctl, "I", "Maximal number of threads"); SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, "threads", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pool, 0, svcpool_threads_sysctl, "I", "Current number of threads"); SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, "groups", CTLFLAG_RD, &pool->sp_groupcount, 0, "Number of thread groups"); SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_used", CTLFLAG_RD, &pool->sp_space_used, "Space in parsed but not handled requests."); SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_used_highest", CTLFLAG_RD, &pool->sp_space_used_highest, "Highest space used since reboot."); SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_high", CTLFLAG_RW, &pool->sp_space_high, "Maximum space in parsed but not handled requests."); SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_low", CTLFLAG_RW, &pool->sp_space_low, "Low water mark for request space."); SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_throttled", CTLFLAG_RD, &pool->sp_space_throttled, 0, "Whether nfs requests are currently throttled"); SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, "request_space_throttle_count", CTLFLAG_RD, &pool->sp_space_throttle_count, 0, "Count of times throttling based on request space has occurred"); } return pool; } /* * Code common to svcpool_destroy() and svcpool_close(), which cleans up * the pool data structures. */ static void svcpool_cleanup(SVCPOOL *pool) { SVCGROUP *grp; SVCXPRT *xprt, *nxprt; struct svc_callout *s; struct svc_loss_callout *sl; struct svcxprt_list cleanup; int g; TAILQ_INIT(&cleanup); for (g = 0; g < SVC_MAXGROUPS; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) { xprt_unregister_locked(xprt); TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); } mtx_unlock(&grp->sg_lock); } TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { if (xprt->xp_socket != NULL) soshutdown(xprt->xp_socket, SHUT_WR); SVC_RELEASE(xprt); } mtx_lock(&pool->sp_lock); while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) { mtx_unlock(&pool->sp_lock); svc_unreg(pool, s->sc_prog, s->sc_vers); mtx_lock(&pool->sp_lock); } while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) { mtx_unlock(&pool->sp_lock); svc_loss_unreg(pool, sl->slc_dispatch); mtx_lock(&pool->sp_lock); } mtx_unlock(&pool->sp_lock); } void svcpool_destroy(SVCPOOL *pool) { SVCGROUP *grp; int g; svcpool_cleanup(pool); for (g = 0; g < SVC_MAXGROUPS; g++) { grp = &pool->sp_groups[g]; mtx_destroy(&grp->sg_lock); } mtx_destroy(&pool->sp_lock); if (pool->sp_rcache) replay_freecache(pool->sp_rcache); sysctl_ctx_free(&pool->sp_sysctl); free(pool, M_RPC); } /* * Similar to svcpool_destroy(), except that it does not destroy the actual * data structures. As such, "pool" may be used again. */ void svcpool_close(SVCPOOL *pool) { SVCGROUP *grp; int g; svcpool_cleanup(pool); /* Now, initialize the pool's state for a fresh svc_run() call. */ mtx_lock(&pool->sp_lock); pool->sp_state = SVCPOOL_INIT; mtx_unlock(&pool->sp_lock); for (g = 0; g < SVC_MAXGROUPS; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); grp->sg_state = SVCPOOL_ACTIVE; mtx_unlock(&grp->sg_lock); } } /* * Sysctl handler to get the present thread count on a pool */ static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS) { SVCPOOL *pool; int threads, error, g; pool = oidp->oid_arg1; threads = 0; mtx_lock(&pool->sp_lock); for (g = 0; g < pool->sp_groupcount; g++) threads += pool->sp_groups[g].sg_threadcount; mtx_unlock(&pool->sp_lock); error = sysctl_handle_int(oidp, &threads, 0, req); return (error); } /* * Sysctl handler to set the minimum thread count on a pool */ static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS) { SVCPOOL *pool; int newminthreads, error, g; pool = oidp->oid_arg1; newminthreads = pool->sp_minthreads; error = sysctl_handle_int(oidp, &newminthreads, 0, req); if (error == 0 && newminthreads != pool->sp_minthreads) { if (newminthreads > pool->sp_maxthreads) return (EINVAL); mtx_lock(&pool->sp_lock); pool->sp_minthreads = newminthreads; for (g = 0; g < pool->sp_groupcount; g++) { pool->sp_groups[g].sg_minthreads = max(1, pool->sp_minthreads / pool->sp_groupcount); } mtx_unlock(&pool->sp_lock); } return (error); } /* * Sysctl handler to set the maximum thread count on a pool */ static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS) { SVCPOOL *pool; int newmaxthreads, error, g; pool = oidp->oid_arg1; newmaxthreads = pool->sp_maxthreads; error = sysctl_handle_int(oidp, &newmaxthreads, 0, req); if (error == 0 && newmaxthreads != pool->sp_maxthreads) { if (newmaxthreads < pool->sp_minthreads) return (EINVAL); mtx_lock(&pool->sp_lock); pool->sp_maxthreads = newmaxthreads; for (g = 0; g < pool->sp_groupcount; g++) { pool->sp_groups[g].sg_maxthreads = max(1, pool->sp_maxthreads / pool->sp_groupcount); } mtx_unlock(&pool->sp_lock); } return (error); } /* * Activate a transport handle. */ void xprt_register(SVCXPRT *xprt) { SVCPOOL *pool = xprt->xp_pool; SVCGROUP *grp; int g; SVC_ACQUIRE(xprt); g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount; xprt->xp_group = grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); xprt->xp_registered = TRUE; xprt->xp_active = FALSE; TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link); mtx_unlock(&grp->sg_lock); } /* * De-activate a transport handle. Note: the locked version doesn't * release the transport - caller must do that after dropping the pool * lock. */ static void xprt_unregister_locked(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_assert(&grp->sg_lock, MA_OWNED); KASSERT(xprt->xp_registered == TRUE, ("xprt_unregister_locked: not registered")); xprt_inactive_locked(xprt); TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link); xprt->xp_registered = FALSE; } void xprt_unregister(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_lock(&grp->sg_lock); if (xprt->xp_registered == FALSE) { /* Already unregistered by another thread */ mtx_unlock(&grp->sg_lock); return; } xprt_unregister_locked(xprt); mtx_unlock(&grp->sg_lock); if (xprt->xp_socket != NULL) soshutdown(xprt->xp_socket, SHUT_WR); SVC_RELEASE(xprt); } /* * Attempt to assign a service thread to this transport. */ static int xprt_assignthread(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; SVCTHREAD *st; mtx_assert(&grp->sg_lock, MA_OWNED); st = LIST_FIRST(&grp->sg_idlethreads); if (st) { LIST_REMOVE(st, st_ilink); SVC_ACQUIRE(xprt); xprt->xp_thread = st; st->st_xprt = xprt; cv_signal(&st->st_cond); return (TRUE); } else { /* * See if we can create a new thread. The * actual thread creation happens in * svc_run_internal because our locking state * is poorly defined (we are typically called * from a socket upcall). Don't create more * than one thread per second. */ if (grp->sg_state == SVCPOOL_ACTIVE && grp->sg_lastcreatetime < time_uptime && grp->sg_threadcount < grp->sg_maxthreads) { grp->sg_state = SVCPOOL_THREADWANTED; } } return (FALSE); } void xprt_active(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_lock(&grp->sg_lock); if (!xprt->xp_registered) { /* * Race with xprt_unregister - we lose. */ mtx_unlock(&grp->sg_lock); return; } if (!xprt->xp_active) { xprt->xp_active = TRUE; if (xprt->xp_thread == NULL) { if (!svc_request_space_available(xprt->xp_pool) || !xprt_assignthread(xprt)) TAILQ_INSERT_TAIL(&grp->sg_active, xprt, xp_alink); } } mtx_unlock(&grp->sg_lock); } void xprt_inactive_locked(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_assert(&grp->sg_lock, MA_OWNED); if (xprt->xp_active) { if (xprt->xp_thread == NULL) TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); xprt->xp_active = FALSE; } } void xprt_inactive(SVCXPRT *xprt) { SVCGROUP *grp = xprt->xp_group; mtx_lock(&grp->sg_lock); xprt_inactive_locked(xprt); mtx_unlock(&grp->sg_lock); } /* * Variant of xprt_inactive() for use only when sure that port is * assigned to thread. For example, within receive handlers. */ void xprt_inactive_self(SVCXPRT *xprt) { KASSERT(xprt->xp_thread != NULL, ("xprt_inactive_self(%p) with NULL xp_thread", xprt)); xprt->xp_active = FALSE; } /* * Add a service program to the callout list. * The dispatch routine will be called when a rpc request for this * program number comes in. */ bool_t svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers, void (*dispatch)(struct svc_req *, SVCXPRT *), const struct netconfig *nconf) { SVCPOOL *pool = xprt->xp_pool; struct svc_callout *s; char *netid = NULL; int flag = 0; /* VARIABLES PROTECTED BY svc_lock: s, svc_head */ if (xprt->xp_netid) { netid = strdup(xprt->xp_netid, M_RPC); flag = 1; } else if (nconf && nconf->nc_netid) { netid = strdup(nconf->nc_netid, M_RPC); flag = 1; } /* must have been created with svc_raw_create */ if ((netid == NULL) && (flag == 1)) { return (FALSE); } mtx_lock(&pool->sp_lock); if ((s = svc_find(pool, prog, vers, netid)) != NULL) { if (netid) free(netid, M_RPC); if (s->sc_dispatch == dispatch) goto rpcb_it; /* he is registering another xptr */ mtx_unlock(&pool->sp_lock); return (FALSE); } s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT); if (s == NULL) { if (netid) free(netid, M_RPC); mtx_unlock(&pool->sp_lock); return (FALSE); } s->sc_prog = prog; s->sc_vers = vers; s->sc_dispatch = dispatch; s->sc_netid = netid; TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link); if ((xprt->xp_netid == NULL) && (flag == 1) && netid) ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC); rpcb_it: mtx_unlock(&pool->sp_lock); /* now register the information with the local binder service */ if (nconf) { bool_t dummy; struct netconfig tnc; struct netbuf nb; tnc = *nconf; nb.buf = &xprt->xp_ltaddr; nb.len = xprt->xp_ltaddr.ss_len; dummy = rpcb_set(prog, vers, &tnc, &nb); return (dummy); } return (TRUE); } /* * Remove a service program from the callout list. */ void svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers) { struct svc_callout *s; /* unregister the information anyway */ (void) rpcb_unset(prog, vers, NULL); mtx_lock(&pool->sp_lock); while ((s = svc_find(pool, prog, vers, NULL)) != NULL) { TAILQ_REMOVE(&pool->sp_callouts, s, sc_link); if (s->sc_netid) mem_free(s->sc_netid, sizeof (s->sc_netid) + 1); mem_free(s, sizeof (struct svc_callout)); } mtx_unlock(&pool->sp_lock); } /* * Add a service connection loss program to the callout list. * The dispatch routine will be called when some port in ths pool die. */ bool_t svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *)) { SVCPOOL *pool = xprt->xp_pool; struct svc_loss_callout *s; mtx_lock(&pool->sp_lock); TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) { if (s->slc_dispatch == dispatch) break; } if (s != NULL) { mtx_unlock(&pool->sp_lock); return (TRUE); } s = malloc(sizeof(struct svc_loss_callout), M_RPC, M_NOWAIT); if (s == NULL) { mtx_unlock(&pool->sp_lock); return (FALSE); } s->slc_dispatch = dispatch; TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link); mtx_unlock(&pool->sp_lock); return (TRUE); } /* * Remove a service connection loss program from the callout list. */ void svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *)) { struct svc_loss_callout *s; mtx_lock(&pool->sp_lock); TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) { if (s->slc_dispatch == dispatch) { TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link); free(s, M_RPC); break; } } mtx_unlock(&pool->sp_lock); } /* ********************** CALLOUT list related stuff ************* */ /* * Search the callout list for a program number, return the callout * struct. */ static struct svc_callout * svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid) { struct svc_callout *s; mtx_assert(&pool->sp_lock, MA_OWNED); TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { if (s->sc_prog == prog && s->sc_vers == vers && (netid == NULL || s->sc_netid == NULL || strcmp(netid, s->sc_netid) == 0)) break; } return (s); } /* ******************* REPLY GENERATION ROUTINES ************ */ static bool_t svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply, struct mbuf *body) { SVCXPRT *xprt = rqstp->rq_xprt; bool_t ok; if (rqstp->rq_args) { m_freem(rqstp->rq_args); rqstp->rq_args = NULL; } if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, rply, svc_getrpccaller(rqstp), body); if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body)) return (FALSE); ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq); if (rqstp->rq_addr) { free(rqstp->rq_addr, M_SONAME); rqstp->rq_addr = NULL; } return (ok); } /* * Send a reply to an rpc request */ bool_t svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location) { struct rpc_msg rply; struct mbuf *m; XDR xdrs; bool_t ok; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = SUCCESS; rply.acpted_rply.ar_results.where = NULL; rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; m = m_getcl(M_WAITOK, MT_DATA, 0); xdrmbuf_create(&xdrs, m, XDR_ENCODE); ok = xdr_results(&xdrs, xdr_location); XDR_DESTROY(&xdrs); if (ok) { return (svc_sendreply_common(rqstp, &rply, m)); } else { m_freem(m); return (FALSE); } } bool_t svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m) { struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = SUCCESS; rply.acpted_rply.ar_results.where = NULL; rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; return (svc_sendreply_common(rqstp, &rply, m)); } /* * No procedure error reply */ void svcerr_noproc(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = PROC_UNAVAIL; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Can't decode args error reply */ void svcerr_decode(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = GARBAGE_ARGS; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Some system error */ void svcerr_systemerr(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = SYSTEM_ERR; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Authentication error reply */ void svcerr_auth(struct svc_req *rqstp, enum auth_stat why) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_DENIED; rply.rjcted_rply.rj_stat = AUTH_ERROR; rply.rjcted_rply.rj_why = why; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Auth too weak error reply */ void svcerr_weakauth(struct svc_req *rqstp) { svcerr_auth(rqstp, AUTH_TOOWEAK); } /* * Program unavailable error reply */ void svcerr_noprog(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = PROG_UNAVAIL; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Program version mismatch error reply */ void svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers) { SVCXPRT *xprt = rqstp->rq_xprt; struct rpc_msg rply; rply.rm_xid = rqstp->rq_xid; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = rqstp->rq_verf; rply.acpted_rply.ar_stat = PROG_MISMATCH; rply.acpted_rply.ar_vers.low = (uint32_t)low_vers; rply.acpted_rply.ar_vers.high = (uint32_t)high_vers; if (xprt->xp_pool->sp_rcache) replay_setreply(xprt->xp_pool->sp_rcache, &rply, svc_getrpccaller(rqstp), NULL); svc_sendreply_common(rqstp, &rply, NULL); } /* * Allocate a new server transport structure. All fields are * initialized to zero and xp_p3 is initialized to point at an * extension structure to hold various flags and authentication * parameters. */ SVCXPRT * svc_xprt_alloc(void) { SVCXPRT *xprt; SVCXPRT_EXT *ext; xprt = mem_alloc(sizeof(SVCXPRT)); ext = mem_alloc(sizeof(SVCXPRT_EXT)); xprt->xp_p3 = ext; refcount_init(&xprt->xp_refs, 1); return (xprt); } /* * Free a server transport structure. */ void svc_xprt_free(SVCXPRT *xprt) { mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT)); /* The size argument is ignored, so 0 is ok. */ mem_free(xprt->xp_gidp, 0); mem_free(xprt, sizeof(SVCXPRT)); } /* ******************* SERVER INPUT STUFF ******************* */ /* * Read RPC requests from a transport and queue them to be * executed. We handle authentication and replay cache replies here. * Actually dispatching the RPC is deferred till svc_executereq. */ static enum xprt_stat svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret) { SVCPOOL *pool = xprt->xp_pool; struct svc_req *r; struct rpc_msg msg; struct mbuf *args; struct svc_loss_callout *s; enum xprt_stat stat; /* now receive msgs from xprtprt (support batch calls) */ r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO); msg.rm_call.cb_cred.oa_base = r->rq_credarea; msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES]; r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES]; if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) { enum auth_stat why; /* * Handle replays and authenticate before queuing the * request to be executed. */ SVC_ACQUIRE(xprt); r->rq_xprt = xprt; if (pool->sp_rcache) { struct rpc_msg repmsg; struct mbuf *repbody; enum replay_state rs; rs = replay_find(pool->sp_rcache, &msg, svc_getrpccaller(r), &repmsg, &repbody); switch (rs) { case RS_NEW: break; case RS_DONE: SVC_REPLY(xprt, &repmsg, r->rq_addr, repbody, &r->rq_reply_seq); if (r->rq_addr) { free(r->rq_addr, M_SONAME); r->rq_addr = NULL; } m_freem(args); goto call_done; default: m_freem(args); goto call_done; } } r->rq_xid = msg.rm_xid; r->rq_prog = msg.rm_call.cb_prog; r->rq_vers = msg.rm_call.cb_vers; r->rq_proc = msg.rm_call.cb_proc; r->rq_size = sizeof(*r) + m_length(args, NULL); r->rq_args = args; if ((why = _authenticate(r, &msg)) != AUTH_OK) { /* * RPCSEC_GSS uses this return code * for requests that form part of its * context establishment protocol and * should not be dispatched to the * application. */ if (why != RPCSEC_GSS_NODISPATCH) svcerr_auth(r, why); goto call_done; } if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) { svcerr_decode(r); goto call_done; } /* * Everything checks out, return request to caller. */ *rqstp_ret = r; r = NULL; } call_done: if (r) { svc_freereq(r); r = NULL; } if ((stat = SVC_STAT(xprt)) == XPRT_DIED) { TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) (*s->slc_dispatch)(xprt); xprt_unregister(xprt); } return (stat); } static void svc_executereq(struct svc_req *rqstp) { SVCXPRT *xprt = rqstp->rq_xprt; SVCPOOL *pool = xprt->xp_pool; int prog_found; rpcvers_t low_vers; rpcvers_t high_vers; struct svc_callout *s; /* now match message with a registered service*/ prog_found = FALSE; low_vers = (rpcvers_t) -1L; high_vers = (rpcvers_t) 0L; TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { if (s->sc_prog == rqstp->rq_prog) { if (s->sc_vers == rqstp->rq_vers) { /* * We hand ownership of r to the * dispatch method - they must call * svc_freereq. */ (*s->sc_dispatch)(rqstp, xprt); return; } /* found correct version */ prog_found = TRUE; if (s->sc_vers < low_vers) low_vers = s->sc_vers; if (s->sc_vers > high_vers) high_vers = s->sc_vers; } /* found correct program */ } /* * if we got here, the program or version * is not served ... */ if (prog_found) svcerr_progvers(rqstp, low_vers, high_vers); else svcerr_noprog(rqstp); svc_freereq(rqstp); } static void svc_checkidle(SVCGROUP *grp) { SVCXPRT *xprt, *nxprt; time_t timo; struct svcxprt_list cleanup; TAILQ_INIT(&cleanup); TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) { /* * Only some transports have idle timers. Don't time * something out which is just waking up. */ if (!xprt->xp_idletimeout || xprt->xp_thread) continue; timo = xprt->xp_lastactive + xprt->xp_idletimeout; if (time_uptime > timo) { xprt_unregister_locked(xprt); TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); } } mtx_unlock(&grp->sg_lock); TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { soshutdown(xprt->xp_socket, SHUT_WR); SVC_RELEASE(xprt); } mtx_lock(&grp->sg_lock); } static void svc_assign_waiting_sockets(SVCPOOL *pool) { SVCGROUP *grp; SVCXPRT *xprt; int g; for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) { if (xprt_assignthread(xprt)) TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); else break; } mtx_unlock(&grp->sg_lock); } } static void svc_change_space_used(SVCPOOL *pool, long delta) { unsigned long value; value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta; if (delta > 0) { if (value >= pool->sp_space_high && !pool->sp_space_throttled) { pool->sp_space_throttled = TRUE; pool->sp_space_throttle_count++; } if (value > pool->sp_space_used_highest) pool->sp_space_used_highest = value; } else { if (value < pool->sp_space_low && pool->sp_space_throttled) { pool->sp_space_throttled = FALSE; svc_assign_waiting_sockets(pool); } } } static bool_t svc_request_space_available(SVCPOOL *pool) { if (pool->sp_space_throttled) return (FALSE); return (TRUE); } static void svc_run_internal(SVCGROUP *grp, bool_t ismaster) { SVCPOOL *pool = grp->sg_pool; SVCTHREAD *st, *stpref; SVCXPRT *xprt; enum xprt_stat stat; struct svc_req *rqstp; struct proc *p; long sz; int error; st = mem_alloc(sizeof(*st)); mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF); st->st_pool = pool; st->st_xprt = NULL; STAILQ_INIT(&st->st_reqs); cv_init(&st->st_cond, "rpcsvc"); mtx_lock(&grp->sg_lock); /* * If we are a new thread which was spawned to cope with * increased load, set the state back to SVCPOOL_ACTIVE. */ if (grp->sg_state == SVCPOOL_THREADSTARTING) grp->sg_state = SVCPOOL_ACTIVE; while (grp->sg_state != SVCPOOL_CLOSING) { /* * Create new thread if requested. */ if (grp->sg_state == SVCPOOL_THREADWANTED) { grp->sg_state = SVCPOOL_THREADSTARTING; grp->sg_lastcreatetime = time_uptime; mtx_unlock(&grp->sg_lock); svc_new_thread(grp); mtx_lock(&grp->sg_lock); continue; } /* * Check for idle transports once per second. */ if (time_uptime > grp->sg_lastidlecheck) { grp->sg_lastidlecheck = time_uptime; svc_checkidle(grp); } xprt = st->st_xprt; if (!xprt) { /* * Enforce maxthreads count. */ if (!ismaster && grp->sg_threadcount > grp->sg_maxthreads) break; /* * Before sleeping, see if we can find an * active transport which isn't being serviced * by a thread. */ if (svc_request_space_available(pool) && (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) { TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); SVC_ACQUIRE(xprt); xprt->xp_thread = st; st->st_xprt = xprt; continue; } LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink); if (ismaster || (!ismaster && grp->sg_threadcount > grp->sg_minthreads)) error = cv_timedwait_sig(&st->st_cond, &grp->sg_lock, 5 * hz); else error = cv_wait_sig(&st->st_cond, &grp->sg_lock); if (st->st_xprt == NULL) LIST_REMOVE(st, st_ilink); /* * Reduce worker thread count when idle. */ if (error == EWOULDBLOCK) { if (!ismaster && (grp->sg_threadcount > grp->sg_minthreads) && !st->st_xprt) break; } else if (error != 0) { KASSERT(error == EINTR || error == ERESTART, ("non-signal error %d", error)); mtx_unlock(&grp->sg_lock); p = curproc; PROC_LOCK(p); if (P_SHOULDSTOP(p) || (p->p_flag & P_TOTAL_STOP) != 0) { thread_suspend_check(0); PROC_UNLOCK(p); mtx_lock(&grp->sg_lock); } else { PROC_UNLOCK(p); svc_exit(pool); mtx_lock(&grp->sg_lock); break; } } continue; } mtx_unlock(&grp->sg_lock); /* * Drain the transport socket and queue up any RPCs. */ xprt->xp_lastactive = time_uptime; do { if (!svc_request_space_available(pool)) break; rqstp = NULL; stat = svc_getreq(xprt, &rqstp); if (rqstp) { svc_change_space_used(pool, rqstp->rq_size); /* * See if the application has a preference * for some other thread. */ if (pool->sp_assign) { stpref = pool->sp_assign(st, rqstp); rqstp->rq_thread = stpref; STAILQ_INSERT_TAIL(&stpref->st_reqs, rqstp, rq_link); mtx_unlock(&stpref->st_lock); if (stpref != st) rqstp = NULL; } else { rqstp->rq_thread = st; STAILQ_INSERT_TAIL(&st->st_reqs, rqstp, rq_link); } } } while (rqstp == NULL && stat == XPRT_MOREREQS && grp->sg_state != SVCPOOL_CLOSING); /* * Move this transport to the end of the active list to * ensure fairness when multiple transports are active. * If this was the last queued request, svc_getreq will end * up calling xprt_inactive to remove from the active list. */ mtx_lock(&grp->sg_lock); xprt->xp_thread = NULL; st->st_xprt = NULL; if (xprt->xp_active) { if (!svc_request_space_available(pool) || !xprt_assignthread(xprt)) TAILQ_INSERT_TAIL(&grp->sg_active, xprt, xp_alink); } mtx_unlock(&grp->sg_lock); SVC_RELEASE(xprt); /* * Execute what we have queued. */ mtx_lock(&st->st_lock); while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) { STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link); mtx_unlock(&st->st_lock); sz = (long)rqstp->rq_size; svc_executereq(rqstp); svc_change_space_used(pool, -sz); mtx_lock(&st->st_lock); } mtx_unlock(&st->st_lock); mtx_lock(&grp->sg_lock); } if (st->st_xprt) { xprt = st->st_xprt; st->st_xprt = NULL; SVC_RELEASE(xprt); } KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit")); mtx_destroy(&st->st_lock); cv_destroy(&st->st_cond); mem_free(st, sizeof(*st)); grp->sg_threadcount--; if (!ismaster) wakeup(grp); mtx_unlock(&grp->sg_lock); } static void svc_thread_start(void *arg) { svc_run_internal((SVCGROUP *) arg, FALSE); kthread_exit(); } static void svc_new_thread(SVCGROUP *grp) { SVCPOOL *pool = grp->sg_pool; struct thread *td; mtx_lock(&grp->sg_lock); grp->sg_threadcount++; mtx_unlock(&grp->sg_lock); kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0, "%s: service", pool->sp_name); } void svc_run(SVCPOOL *pool) { int g, i; struct proc *p; struct thread *td; SVCGROUP *grp; p = curproc; td = curthread; snprintf(td->td_name, sizeof(td->td_name), "%s: master", pool->sp_name); pool->sp_state = SVCPOOL_ACTIVE; pool->sp_proc = p; /* Choose group count based on number of threads and CPUs. */ pool->sp_groupcount = max(1, min(SVC_MAXGROUPS, min(pool->sp_maxthreads / 2, mp_ncpus) / 6)); for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; grp->sg_minthreads = max(1, pool->sp_minthreads / pool->sp_groupcount); grp->sg_maxthreads = max(1, pool->sp_maxthreads / pool->sp_groupcount); grp->sg_lastcreatetime = time_uptime; } /* Starting threads */ pool->sp_groups[0].sg_threadcount++; for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++) svc_new_thread(grp); } svc_run_internal(&pool->sp_groups[0], TRUE); /* Waiting for threads to stop. */ for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); while (grp->sg_threadcount > 0) msleep(grp, &grp->sg_lock, 0, "svcexit", 0); mtx_unlock(&grp->sg_lock); } } void svc_exit(SVCPOOL *pool) { SVCGROUP *grp; SVCTHREAD *st; int g; pool->sp_state = SVCPOOL_CLOSING; for (g = 0; g < pool->sp_groupcount; g++) { grp = &pool->sp_groups[g]; mtx_lock(&grp->sg_lock); if (grp->sg_state != SVCPOOL_CLOSING) { grp->sg_state = SVCPOOL_CLOSING; LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink) cv_signal(&st->st_cond); } mtx_unlock(&grp->sg_lock); } } bool_t svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) { struct mbuf *m; XDR xdrs; bool_t stat; m = rqstp->rq_args; rqstp->rq_args = NULL; xdrmbuf_create(&xdrs, m, XDR_DECODE); stat = xargs(&xdrs, args); XDR_DESTROY(&xdrs); return (stat); } bool_t svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) { XDR xdrs; if (rqstp->rq_addr) { free(rqstp->rq_addr, M_SONAME); rqstp->rq_addr = NULL; } xdrs.x_op = XDR_FREE; return (xargs(&xdrs, args)); } void svc_freereq(struct svc_req *rqstp) { SVCTHREAD *st; SVCPOOL *pool; st = rqstp->rq_thread; if (st) { pool = st->st_pool; if (pool->sp_done) pool->sp_done(st, rqstp); } if (rqstp->rq_auth.svc_ah_ops) SVCAUTH_RELEASE(&rqstp->rq_auth); if (rqstp->rq_xprt) { SVC_RELEASE(rqstp->rq_xprt); } if (rqstp->rq_addr) free(rqstp->rq_addr, M_SONAME); if (rqstp->rq_args) m_freem(rqstp->rq_args); free(rqstp, M_RPC); } diff --git a/sys/rpc/svc_auth.c b/sys/rpc/svc_auth.c index 21149139b5a5..a5c9753c4fde 100644 --- a/sys/rpc/svc_auth.c +++ b/sys/rpc/svc_auth.c @@ -1,227 +1,238 @@ /* $NetBSD: svc_auth.c,v 1.12 2000/07/06 03:10:35 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1986-1991 by Sun Microsystems Inc. */ #if defined(LIBC_SCCS) && !defined(lint) #ident "@(#)svc_auth.c 1.16 94/04/24 SMI" static char sccsid[] = "@(#)svc_auth.c 1.26 89/02/07 Copyr 1984 Sun Micro"; #endif #include __FBSDID("$FreeBSD$"); /* * svc_auth.c, Server-side rpc authenticator interface. * */ #include #include #include +#include #include #include #include #include #include static enum auth_stat (*_svcauth_rpcsec_gss)(struct svc_req *, struct rpc_msg *) = NULL; static int (*_svcauth_rpcsec_gss_getcred)(struct svc_req *, struct ucred **, int *); static const struct svc_auth_ops svc_auth_null_ops; /* * The call rpc message, msg has been obtained from the wire. The msg contains * the raw form of credentials and verifiers. authenticate returns AUTH_OK * if the msg is successfully authenticated. If AUTH_OK then the routine also * does the following things: * set rqst->rq_xprt->verf to the appropriate response verifier; * sets rqst->rq_client_cred to the "cooked" form of the credentials. * * NB: rqst->rq_cxprt->verf must be pre-allocated; * its length is set appropriately. * * The caller still owns and is responsible for msg->u.cmb.cred and * msg->u.cmb.verf. The authentication system retains ownership of * rqst->rq_client_cred, the cooked credentials. * * There is an assumption that any flavour less than AUTH_NULL is * invalid. */ enum auth_stat _authenticate(struct svc_req *rqst, struct rpc_msg *msg) { int cred_flavor; enum auth_stat dummy; rqst->rq_cred = msg->rm_call.cb_cred; rqst->rq_auth.svc_ah_ops = &svc_auth_null_ops; rqst->rq_auth.svc_ah_private = NULL; cred_flavor = rqst->rq_cred.oa_flavor; switch (cred_flavor) { case AUTH_NULL: dummy = _svcauth_null(rqst, msg); return (dummy); case AUTH_SYS: if ((rqst->rq_xprt->xp_tls & RPCTLS_FLAGS_DISABLED) != 0) return (AUTH_REJECTEDCRED); dummy = _svcauth_unix(rqst, msg); return (dummy); case AUTH_SHORT: if ((rqst->rq_xprt->xp_tls & RPCTLS_FLAGS_DISABLED) != 0) return (AUTH_REJECTEDCRED); dummy = _svcauth_short(rqst, msg); return (dummy); case RPCSEC_GSS: if ((rqst->rq_xprt->xp_tls & RPCTLS_FLAGS_DISABLED) != 0) return (AUTH_REJECTEDCRED); if (!_svcauth_rpcsec_gss) return (AUTH_REJECTEDCRED); dummy = _svcauth_rpcsec_gss(rqst, msg); return (dummy); case AUTH_TLS: dummy = _svcauth_rpcsec_tls(rqst, msg); return (dummy); default: break; } return (AUTH_REJECTEDCRED); } /* * A set of null auth methods used by any authentication protocols * that don't need to inspect or modify the message body. */ static bool_t svcauth_null_wrap(SVCAUTH *auth, struct mbuf **mp) { return (TRUE); } static bool_t svcauth_null_unwrap(SVCAUTH *auth, struct mbuf **mp) { return (TRUE); } static void svcauth_null_release(SVCAUTH *auth) { } static const struct svc_auth_ops svc_auth_null_ops = { .svc_ah_wrap = svcauth_null_wrap, .svc_ah_unwrap = svcauth_null_unwrap, .svc_ah_release = svcauth_null_release, }; /*ARGSUSED*/ enum auth_stat _svcauth_null(struct svc_req *rqst, struct rpc_msg *msg) { rqst->rq_verf = _null_auth; return (AUTH_OK); } int svc_auth_reg(int flavor, enum auth_stat (*svcauth)(struct svc_req *, struct rpc_msg *), int (*getcred)(struct svc_req *, struct ucred **, int *)) { if (flavor == RPCSEC_GSS) { _svcauth_rpcsec_gss = svcauth; _svcauth_rpcsec_gss_getcred = getcred; } return (TRUE); } int svc_getcred(struct svc_req *rqst, struct ucred **crp, int *flavorp) { struct ucred *cr = NULL; int flavor; struct xucred *xcr; SVCXPRT *xprt = rqst->rq_xprt; flavor = rqst->rq_cred.oa_flavor; if (flavorp) *flavorp = flavor; /* * If there are credentials acquired via a TLS * certificate for this TCP connection, use those * instead of what is in the RPC header. */ if ((xprt->xp_tls & (RPCTLS_FLAGS_CERTUSER | RPCTLS_FLAGS_DISABLED)) == RPCTLS_FLAGS_CERTUSER && flavor == AUTH_UNIX) { cr = crget(); cr->cr_uid = cr->cr_ruid = cr->cr_svuid = xprt->xp_uid; crsetgroups(cr, xprt->xp_ngrps, xprt->xp_gidp); cr->cr_rgid = cr->cr_svgid = xprt->xp_gidp[0]; - cr->cr_prison = &prison0; +#ifdef VNET_NFSD + if (jailed(curthread->td_ucred)) + cr->cr_prison = curthread->td_ucred->cr_prison; + else +#endif + cr->cr_prison = &prison0; prison_hold(cr->cr_prison); *crp = cr; return (TRUE); } switch (flavor) { case AUTH_UNIX: xcr = (struct xucred *) rqst->rq_clntcred; cr = crget(); cr->cr_uid = cr->cr_ruid = cr->cr_svuid = xcr->cr_uid; crsetgroups(cr, xcr->cr_ngroups, xcr->cr_groups); cr->cr_rgid = cr->cr_svgid = cr->cr_groups[0]; - cr->cr_prison = &prison0; +#ifdef VNET_NFSD + if (jailed(curthread->td_ucred)) + cr->cr_prison = curthread->td_ucred->cr_prison; + else +#endif + cr->cr_prison = &prison0; prison_hold(cr->cr_prison); *crp = cr; return (TRUE); case RPCSEC_GSS: if (!_svcauth_rpcsec_gss_getcred) return (FALSE); return (_svcauth_rpcsec_gss_getcred(rqst, crp, flavorp)); default: return (FALSE); } } diff --git a/sys/rpc/svc_dg.c b/sys/rpc/svc_dg.c index 4ab86bb5904a..bf3cbd2a6137 100644 --- a/sys/rpc/svc_dg.c +++ b/sys/rpc/svc_dg.c @@ -1,307 +1,311 @@ /* $NetBSD: svc_dg.c,v 1.4 2000/07/06 03:10:35 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1986-1991 by Sun Microsystems Inc. */ #if defined(LIBC_SCCS) && !defined(lint) #ident "@(#)svc_dg.c 1.17 94/04/24 SMI" #endif #include __FBSDID("$FreeBSD$"); /* * svc_dg.c, Server side for connectionless RPC. */ #include +#include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include static enum xprt_stat svc_dg_stat(SVCXPRT *); static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); static void svc_dg_destroy(SVCXPRT *); static bool_t svc_dg_control(SVCXPRT *, const u_int, void *); static int svc_dg_soupcall(struct socket *so, void *arg, int waitflag); static const struct xp_ops svc_dg_ops = { .xp_recv = svc_dg_recv, .xp_stat = svc_dg_stat, .xp_reply = svc_dg_reply, .xp_destroy = svc_dg_destroy, .xp_control = svc_dg_control, }; /* * Usage: * xprt = svc_dg_create(sock, sendsize, recvsize); * Does other connectionless specific initializations. * Once *xprt is initialized, it is registered. * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable * system defaults are chosen. * The routines returns NULL if a problem occurred. */ static const char svc_dg_str[] = "svc_dg_create: %s"; static const char svc_dg_err1[] = "could not get transport information"; static const char svc_dg_err2[] = "transport does not support data transfer"; static const char __no_mem_str[] = "out of memory"; SVCXPRT * svc_dg_create(SVCPOOL *pool, struct socket *so, size_t sendsize, size_t recvsize) { SVCXPRT *xprt; struct __rpc_sockinfo si; struct sockaddr* sa; int error; + if (jailed(curthread->td_ucred)) + return (NULL); if (!__rpc_socket2sockinfo(so, &si)) { printf(svc_dg_str, svc_dg_err1); return (NULL); } /* * Find the receive and the send size */ sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize); recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize); if ((sendsize == 0) || (recvsize == 0)) { printf(svc_dg_str, svc_dg_err2); return (NULL); } xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = NULL; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_dg_ops; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_sockaddr(so, &sa); CURVNET_RESTORE(); if (error) goto freedata; memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); free(sa, M_SONAME); xprt_register(xprt); SOCKBUF_LOCK(&so->so_rcv); soupcall_set(so, SO_RCV, svc_dg_soupcall, xprt); SOCKBUF_UNLOCK(&so->so_rcv); return (xprt); freedata: (void) printf(svc_dg_str, __no_mem_str); svc_xprt_free(xprt); return (NULL); } /*ARGSUSED*/ static enum xprt_stat svc_dg_stat(SVCXPRT *xprt) { if (soreadable(xprt->xp_socket)) return (XPRT_MOREREQS); return (XPRT_IDLE); } static bool_t svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct uio uio; struct sockaddr *raddr; struct mbuf *mreq; XDR xdrs; int error, rcvflag; /* * Serialise access to the socket. */ sx_xlock(&xprt->xp_lock); /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to read a * packet from the socket and process it. If the read fails, * we have drained all pending requests so we call * xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; mreq = NULL; rcvflag = MSG_DONTWAIT; error = soreceive(xprt->xp_socket, &raddr, &uio, &mreq, NULL, &rcvflag); if (error == EWOULDBLOCK) { /* * We must re-test for readability after taking the * lock to protect us in the case where a new packet * arrives on the socket after our call to soreceive * fails with EWOULDBLOCK. The pool lock protects us * from racing the upcall after our soreadable() call * returns false. */ SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (!soreadable(xprt->xp_socket)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); soupcall_clear(xprt->xp_socket, SO_RCV); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); xprt_inactive_self(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); xdrmbuf_create(&xdrs, mreq, XDR_DECODE); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = raddr; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } static bool_t svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error; mrep = m_gethdr(M_WAITOK, MT_DATA); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); error = sosend(xprt->xp_socket, addr, NULL, mrep, NULL, 0, curthread); if (!error) { stat = TRUE; } } else { m_freem(mrep); } XDR_DESTROY(&xdrs); xprt->xp_p2 = NULL; return (stat); } static void svc_dg_destroy(SVCXPRT *xprt) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); soupcall_clear(xprt->xp_socket, SO_RCV); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); sx_destroy(&xprt->xp_lock); if (xprt->xp_socket) (void)soclose(xprt->xp_socket); if (xprt->xp_netid) (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); svc_xprt_free(xprt); } static bool_t /*ARGSUSED*/ svc_dg_control(xprt, rq, in) SVCXPRT *xprt; const u_int rq; void *in; { return (FALSE); } static int svc_dg_soupcall(struct socket *so, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; xprt_active(xprt); return (SU_OK); }