diff --git a/sys/rpc/rpcsec_gss/rpcsec_gss.c b/sys/rpc/rpcsec_gss/rpcsec_gss.c index 53770d139c61..89d1c56f7cc2 100644 --- a/sys/rpc/rpcsec_gss/rpcsec_gss.c +++ b/sys/rpc/rpcsec_gss/rpcsec_gss.c @@ -1,1215 +1,1215 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2008 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* auth_gss.c RPCSEC_GSS client routines. Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: auth_gss.c,v 1.32 2002/01/15 15:43:00 andros Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rpcsec_gss_int.h" static void rpc_gss_nextverf(AUTH*); static bool_t rpc_gss_marshal(AUTH *, uint32_t, XDR *, struct mbuf *); static bool_t rpc_gss_init(AUTH *auth, rpc_gss_options_ret_t *options_ret); static bool_t rpc_gss_refresh(AUTH *, void *); static bool_t rpc_gss_validate(AUTH *, uint32_t, struct opaque_auth *, struct mbuf **); static void rpc_gss_destroy(AUTH *); static void rpc_gss_destroy_context(AUTH *, bool_t); static const struct auth_ops rpc_gss_ops = { .ah_nextverf = rpc_gss_nextverf, .ah_marshal = rpc_gss_marshal, .ah_validate = rpc_gss_validate, .ah_refresh = rpc_gss_refresh, .ah_destroy = rpc_gss_destroy, }; enum rpcsec_gss_state { RPCSEC_GSS_START, RPCSEC_GSS_CONTEXT, RPCSEC_GSS_ESTABLISHED, RPCSEC_GSS_DESTROYING }; struct rpc_pending_request { uint32_t pr_xid; /* XID of rpc */ uint32_t pr_seq; /* matching GSS seq */ LIST_ENTRY(rpc_pending_request) pr_link; }; LIST_HEAD(rpc_pending_request_list, rpc_pending_request); struct rpc_gss_data { volatile u_int gd_refs; /* number of current users */ struct mtx gd_lock; uint32_t gd_hash; AUTH *gd_auth; /* link back to AUTH */ struct ucred *gd_ucred; /* matching local cred */ char *gd_principal; /* server principal name */ char *gd_clntprincipal; /* client principal name */ rpc_gss_options_req_t gd_options; /* GSS context options */ enum rpcsec_gss_state gd_state; /* connection state */ gss_buffer_desc gd_verf; /* save GSS_S_COMPLETE * NULL RPC verfier to * process at end of * context negotiation */ CLIENT *gd_clnt; /* client handle */ gss_OID gd_mech; /* mechanism to use */ gss_qop_t gd_qop; /* quality of protection */ gss_ctx_id_t gd_ctx; /* context id */ struct rpc_gss_cred gd_cred; /* client credentials */ uint32_t gd_seq; /* next sequence number */ u_int gd_win; /* sequence window */ struct rpc_pending_request_list gd_reqs; TAILQ_ENTRY(rpc_gss_data) gd_link; TAILQ_ENTRY(rpc_gss_data) gd_alllink; }; TAILQ_HEAD(rpc_gss_data_list, rpc_gss_data); #define AUTH_PRIVATE(auth) ((struct rpc_gss_data *)auth->ah_private) static struct timeval AUTH_TIMEOUT = { 25, 0 }; #define RPC_GSS_HASH_SIZE 11 #define RPC_GSS_MAX 256 static struct rpc_gss_data_list rpc_gss_cache[RPC_GSS_HASH_SIZE]; static struct rpc_gss_data_list rpc_gss_all; static struct sx rpc_gss_lock; static int rpc_gss_count; static AUTH *rpc_gss_seccreate_int(CLIENT *, struct ucred *, const char *, const char *, gss_OID, rpc_gss_service_t, u_int, rpc_gss_options_req_t *, rpc_gss_options_ret_t *); static void rpc_gss_hashinit(void *dummy) { int i; for (i = 0; i < RPC_GSS_HASH_SIZE; i++) TAILQ_INIT(&rpc_gss_cache[i]); TAILQ_INIT(&rpc_gss_all); sx_init(&rpc_gss_lock, "rpc_gss_lock"); } SYSINIT(rpc_gss_hashinit, SI_SUB_KMEM, SI_ORDER_ANY, rpc_gss_hashinit, NULL); static uint32_t rpc_gss_hash(const char *principal, gss_OID mech, struct ucred *cred, rpc_gss_service_t service) { uint32_t h; h = HASHSTEP(HASHINIT, cred->cr_uid); h = hash32_str(principal, h); h = hash32_buf(mech->elements, mech->length, h); h = HASHSTEP(h, (int) service); return (h % RPC_GSS_HASH_SIZE); } /* * Simplified interface to create a security association for the * current thread's * ucred. */ AUTH * rpc_gss_secfind(CLIENT *clnt, struct ucred *cred, const char *principal, gss_OID mech_oid, rpc_gss_service_t service) { uint32_t h, th; AUTH *auth; struct rpc_gss_data *gd, *tgd; rpc_gss_options_ret_t options; if (rpc_gss_count > RPC_GSS_MAX) { while (rpc_gss_count > RPC_GSS_MAX) { sx_xlock(&rpc_gss_lock); tgd = TAILQ_FIRST(&rpc_gss_all); th = tgd->gd_hash; TAILQ_REMOVE(&rpc_gss_cache[th], tgd, gd_link); TAILQ_REMOVE(&rpc_gss_all, tgd, gd_alllink); rpc_gss_count--; sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(tgd->gd_auth); } } /* * See if we already have an AUTH which matches. */ h = rpc_gss_hash(principal, mech_oid, cred, service); again: sx_slock(&rpc_gss_lock); TAILQ_FOREACH(gd, &rpc_gss_cache[h], gd_link) { if (gd->gd_ucred->cr_uid == cred->cr_uid && !strcmp(gd->gd_principal, principal) && gd->gd_mech == mech_oid && gd->gd_cred.gc_svc == service) { refcount_acquire(&gd->gd_refs); if (sx_try_upgrade(&rpc_gss_lock)) { /* * Keep rpc_gss_all LRU sorted. */ TAILQ_REMOVE(&rpc_gss_all, gd, gd_alllink); TAILQ_INSERT_TAIL(&rpc_gss_all, gd, gd_alllink); sx_xunlock(&rpc_gss_lock); } else { sx_sunlock(&rpc_gss_lock); } /* * If the state != ESTABLISHED, try and initialize * the authenticator again. This will happen if the * user's credentials have expired. It may succeed now, * if they have done a kinit or similar. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { memset(&options, 0, sizeof (options)); (void) rpc_gss_init(gd->gd_auth, &options); } return (gd->gd_auth); } } sx_sunlock(&rpc_gss_lock); /* * We missed in the cache - create a new association. */ auth = rpc_gss_seccreate_int(clnt, cred, NULL, principal, mech_oid, service, GSS_C_QOP_DEFAULT, NULL, NULL); if (!auth) return (NULL); gd = AUTH_PRIVATE(auth); gd->gd_hash = h; sx_xlock(&rpc_gss_lock); TAILQ_FOREACH(tgd, &rpc_gss_cache[h], gd_link) { if (tgd->gd_ucred->cr_uid == cred->cr_uid && !strcmp(tgd->gd_principal, principal) && tgd->gd_mech == mech_oid && tgd->gd_cred.gc_svc == service) { /* * We lost a race to create the AUTH that * matches this cred. */ sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(auth); goto again; } } rpc_gss_count++; TAILQ_INSERT_TAIL(&rpc_gss_cache[h], gd, gd_link); TAILQ_INSERT_TAIL(&rpc_gss_all, gd, gd_alllink); refcount_acquire(&gd->gd_refs); /* one for the cache, one for user */ sx_xunlock(&rpc_gss_lock); return (auth); } void rpc_gss_secpurge(CLIENT *clnt) { uint32_t h; struct rpc_gss_data *gd, *tgd; TAILQ_FOREACH_SAFE(gd, &rpc_gss_all, gd_alllink, tgd) { if (gd->gd_clnt == clnt) { sx_xlock(&rpc_gss_lock); h = gd->gd_hash; TAILQ_REMOVE(&rpc_gss_cache[h], gd, gd_link); TAILQ_REMOVE(&rpc_gss_all, gd, gd_alllink); rpc_gss_count--; sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(gd->gd_auth); } } } AUTH * rpc_gss_seccreate(CLIENT *clnt, struct ucred *cred, const char *clnt_principal, const char *principal, const char *mechanism, rpc_gss_service_t service, const char *qop, rpc_gss_options_req_t *options_req, rpc_gss_options_ret_t *options_ret) { gss_OID oid; u_int qop_num; /* * Bail out now if we don't know this mechanism. */ if (!rpc_gss_mech_to_oid(mechanism, &oid)) return (NULL); if (qop) { if (!rpc_gss_qop_to_num(qop, mechanism, &qop_num)) return (NULL); } else { qop_num = GSS_C_QOP_DEFAULT; } return (rpc_gss_seccreate_int(clnt, cred, clnt_principal, principal, oid, service, qop_num, options_req, options_ret)); } void rpc_gss_refresh_auth(AUTH *auth) { struct rpc_gss_data *gd; rpc_gss_options_ret_t options; gd = AUTH_PRIVATE(auth); /* * If the state != ESTABLISHED, try and initialize * the authenticator again. This will happen if the * user's credentials have expired. It may succeed now, * if they have done a kinit or similar. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { memset(&options, 0, sizeof (options)); (void) rpc_gss_init(auth, &options); } } static AUTH * rpc_gss_seccreate_int(CLIENT *clnt, struct ucred *cred, const char *clnt_principal, const char *principal, gss_OID mech_oid, rpc_gss_service_t service, u_int qop_num, rpc_gss_options_req_t *options_req, rpc_gss_options_ret_t *options_ret) { AUTH *auth; rpc_gss_options_ret_t options; struct rpc_gss_data *gd; /* * If the caller doesn't want the options, point at local * storage to simplify the code below. */ if (!options_ret) options_ret = &options; /* * Default service is integrity. */ if (service == rpc_gss_svc_default) service = rpc_gss_svc_integrity; memset(options_ret, 0, sizeof(*options_ret)); rpc_gss_log_debug("in rpc_gss_seccreate()"); memset(&rpc_createerr, 0, sizeof(rpc_createerr)); auth = mem_alloc(sizeof(*auth)); if (auth == NULL) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = ENOMEM; return (NULL); } gd = mem_alloc(sizeof(*gd)); if (gd == NULL) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = ENOMEM; mem_free(auth, sizeof(*auth)); return (NULL); } auth->ah_ops = &rpc_gss_ops; auth->ah_private = (caddr_t) gd; auth->ah_cred.oa_flavor = RPCSEC_GSS; refcount_init(&gd->gd_refs, 1); mtx_init(&gd->gd_lock, "gd->gd_lock", NULL, MTX_DEF); gd->gd_auth = auth; gd->gd_ucred = crdup(cred); gd->gd_principal = strdup(principal, M_RPC); if (clnt_principal != NULL) gd->gd_clntprincipal = strdup(clnt_principal, M_RPC); else gd->gd_clntprincipal = NULL; if (options_req) { gd->gd_options = *options_req; } else { gd->gd_options.req_flags = GSS_C_MUTUAL_FLAG; gd->gd_options.time_req = 0; gd->gd_options.my_cred = GSS_C_NO_CREDENTIAL; gd->gd_options.input_channel_bindings = NULL; } CLNT_ACQUIRE(clnt); gd->gd_clnt = clnt; gd->gd_ctx = GSS_C_NO_CONTEXT; gd->gd_mech = mech_oid; gd->gd_qop = qop_num; gd->gd_cred.gc_version = RPCSEC_GSS_VERSION; gd->gd_cred.gc_proc = RPCSEC_GSS_INIT; gd->gd_cred.gc_seq = 0; gd->gd_cred.gc_svc = service; LIST_INIT(&gd->gd_reqs); if (!rpc_gss_init(auth, options_ret)) { goto bad; } return (auth); bad: AUTH_DESTROY(auth); return (NULL); } bool_t rpc_gss_set_defaults(AUTH *auth, rpc_gss_service_t service, const char *qop) { struct rpc_gss_data *gd; u_int qop_num; const char *mechanism; gd = AUTH_PRIVATE(auth); if (!rpc_gss_oid_to_mech(gd->gd_mech, &mechanism)) { return (FALSE); } if (qop) { if (!rpc_gss_qop_to_num(qop, mechanism, &qop_num)) { return (FALSE); } } else { qop_num = GSS_C_QOP_DEFAULT; } gd->gd_cred.gc_svc = service; gd->gd_qop = qop_num; return (TRUE); } static void rpc_gss_purge_xid(struct rpc_gss_data *gd, uint32_t xid) { struct rpc_pending_request *pr, *npr; struct rpc_pending_request_list reqs; LIST_INIT(&reqs); mtx_lock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &gd->gd_reqs, pr_link, npr) { if (pr->pr_xid == xid) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&reqs, pr, pr_link); } } mtx_unlock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) { mem_free(pr, sizeof(*pr)); } } static uint32_t rpc_gss_alloc_seq(struct rpc_gss_data *gd) { uint32_t seq; mtx_lock(&gd->gd_lock); seq = gd->gd_seq; gd->gd_seq++; mtx_unlock(&gd->gd_lock); return (seq); } static void rpc_gss_nextverf(__unused AUTH *auth) { /* not used */ } static bool_t rpc_gss_marshal(AUTH *auth, uint32_t xid, XDR *xdrs, struct mbuf *args) { struct rpc_gss_data *gd; struct rpc_pending_request *pr; uint32_t seq; XDR tmpxdrs; struct rpc_gss_cred gsscred; char credbuf[MAX_AUTH_BYTES]; struct opaque_auth creds, verf; gss_buffer_desc rpcbuf, checksum; OM_uint32 maj_stat, min_stat; bool_t xdr_stat; rpc_gss_log_debug("in rpc_gss_marshal()"); gd = AUTH_PRIVATE(auth); gsscred = gd->gd_cred; seq = rpc_gss_alloc_seq(gd); gsscred.gc_seq = seq; xdrmem_create(&tmpxdrs, credbuf, sizeof(credbuf), XDR_ENCODE); if (!xdr_rpc_gss_cred(&tmpxdrs, &gsscred)) { XDR_DESTROY(&tmpxdrs); _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } creds.oa_flavor = RPCSEC_GSS; creds.oa_base = credbuf; creds.oa_length = XDR_GETPOS(&tmpxdrs); XDR_DESTROY(&tmpxdrs); xdr_opaque_auth(xdrs, &creds); if (gd->gd_cred.gc_proc == RPCSEC_GSS_INIT || gd->gd_cred.gc_proc == RPCSEC_GSS_CONTINUE_INIT) { if (!xdr_opaque_auth(xdrs, &_null_auth)) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } return (xdr_putmbuf(xdrs, args)); } else { /* * Keep track of this XID + seq pair so that we can do * the matching gss_verify_mic in AUTH_VALIDATE. */ pr = mem_alloc(sizeof(struct rpc_pending_request)); mtx_lock(&gd->gd_lock); pr->pr_xid = xid; pr->pr_seq = seq; LIST_INSERT_HEAD(&gd->gd_reqs, pr, pr_link); mtx_unlock(&gd->gd_lock); /* * Checksum serialized RPC header, up to and including * credential. For the in-kernel environment, we * assume that our XDR stream is on a contiguous * memory buffer (e.g. an mbuf). */ rpcbuf.length = XDR_GETPOS(xdrs); XDR_SETPOS(xdrs, 0); rpcbuf.value = XDR_INLINE(xdrs, rpcbuf.length); maj_stat = gss_get_mic(&min_stat, gd->gd_ctx, gd->gd_qop, &rpcbuf, &checksum); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_get_mic", gd->gd_mech, maj_stat, min_stat); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); } _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); return (FALSE); } verf.oa_flavor = RPCSEC_GSS; verf.oa_base = checksum.value; verf.oa_length = checksum.length; xdr_stat = xdr_opaque_auth(xdrs, &verf); gss_release_buffer(&min_stat, &checksum); if (!xdr_stat) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } if (gd->gd_state != RPCSEC_GSS_ESTABLISHED || gd->gd_cred.gc_svc == rpc_gss_svc_none) { return (xdr_putmbuf(xdrs, args)); } else { if (!xdr_rpc_gss_wrap_data(&args, gd->gd_ctx, gd->gd_qop, gd->gd_cred.gc_svc, seq)) return (FALSE); return (xdr_putmbuf(xdrs, args)); } } return (TRUE); } static bool_t rpc_gss_validate(AUTH *auth, uint32_t xid, struct opaque_auth *verf, struct mbuf **resultsp) { struct rpc_gss_data *gd; struct rpc_pending_request *pr, *npr; struct rpc_pending_request_list reqs; gss_qop_t qop_state; uint32_t num, seq; gss_buffer_desc signbuf, checksum; OM_uint32 maj_stat, min_stat; rpc_gss_log_debug("in rpc_gss_validate()"); gd = AUTH_PRIVATE(auth); /* * The client will call us with a NULL verf when it gives up * on an XID. */ if (!verf) { rpc_gss_purge_xid(gd, xid); return (TRUE); } if (gd->gd_state == RPCSEC_GSS_CONTEXT) { /* * Save the on the wire verifier to validate last INIT * phase packet after decode if the major status is * GSS_S_COMPLETE. */ if (gd->gd_verf.value) xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); gd->gd_verf.value = mem_alloc(verf->oa_length); if (gd->gd_verf.value == NULL) { printf("gss_validate: out of memory\n"); _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); m_freem(*resultsp); *resultsp = NULL; return (FALSE); } memcpy(gd->gd_verf.value, verf->oa_base, verf->oa_length); gd->gd_verf.length = verf->oa_length; return (TRUE); } /* * We need to check the verifier against all the requests * we've send for this XID - for unreliable protocols, we * retransmit with the same XID but different sequence * number. We temporarily take this set of requests out of the * list so that we can work through the list without having to * hold the lock. */ mtx_lock(&gd->gd_lock); LIST_INIT(&reqs); LIST_FOREACH_SAFE(pr, &gd->gd_reqs, pr_link, npr) { if (pr->pr_xid == xid) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&reqs, pr, pr_link); } } mtx_unlock(&gd->gd_lock); LIST_FOREACH(pr, &reqs, pr_link) { if (pr->pr_xid == xid) { seq = pr->pr_seq; num = htonl(seq); signbuf.value = # signbuf.length = sizeof(num); checksum.value = verf->oa_base; checksum.length = verf->oa_length; maj_stat = gss_verify_mic(&min_stat, gd->gd_ctx, &signbuf, &checksum, &qop_state); if (maj_stat != GSS_S_COMPLETE || qop_state != gd->gd_qop) { continue; } if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); break; } //rpc_gss_purge_reqs(gd, seq); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) mem_free(pr, sizeof(*pr)); if (gd->gd_cred.gc_svc == rpc_gss_svc_none) { return (TRUE); } else { if (!xdr_rpc_gss_unwrap_data(resultsp, gd->gd_ctx, gd->gd_qop, gd->gd_cred.gc_svc, seq)) { return (FALSE); } } return (TRUE); } } /* * We didn't match - put back any entries for this XID so that * a future call to validate can retry. */ mtx_lock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&gd->gd_reqs, pr, pr_link); } mtx_unlock(&gd->gd_lock); /* * Nothing matches - give up. */ _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); m_freem(*resultsp); *resultsp = NULL; return (FALSE); } static bool_t rpc_gss_init(AUTH *auth, rpc_gss_options_ret_t *options_ret) { struct thread *td = curthread; struct ucred *crsave; struct rpc_gss_data *gd; struct rpc_gss_init_res gr; gss_buffer_desc principal_desc; gss_buffer_desc *recv_tokenp, recv_token, send_token; gss_name_t name; OM_uint32 maj_stat, min_stat, call_stat; const char *mech; struct rpc_callextra ext; gss_OID mech_oid; gss_OID_set mechlist; static enum krb_imp my_krb_imp = KRBIMP_UNKNOWN; rpc_gss_log_debug("in rpc_gss_refresh()"); gd = AUTH_PRIVATE(auth); mtx_lock(&gd->gd_lock); /* * If the context isn't in START state, someone else is * refreshing - we wait till they are done. If they fail, they * will put the state back to START and we can try (most * likely to also fail). */ while (gd->gd_state != RPCSEC_GSS_START && gd->gd_state != RPCSEC_GSS_ESTABLISHED) { msleep(gd, &gd->gd_lock, 0, "gssstate", 0); } if (gd->gd_state == RPCSEC_GSS_ESTABLISHED) { mtx_unlock(&gd->gd_lock); return (TRUE); } gd->gd_state = RPCSEC_GSS_CONTEXT; mtx_unlock(&gd->gd_lock); gd->gd_cred.gc_proc = RPCSEC_GSS_INIT; gd->gd_cred.gc_seq = 0; /* * XXX Threads from inside jails can get here via calls * to clnt_vc_call()->AUTH_REFRESH()->rpc_gss_refresh() * but the NFS mount is always done outside of the * jails in vnet0. Since the thread credentials won't * necessarily have cr_prison == vnet0 and this function * has no access to the socket, using vnet0 seems the * only option. This is broken if NFS mounts are enabled * within vnet prisons. */ KGSS_CURVNET_SET_QUIET(vnet0); /* * For KerberosV, if there is a client principal name, that implies * that this is a host based initiator credential in the default * keytab file. For this case, it is necessary to do a * gss_acquire_cred(). When this is done, the gssd daemon will * do the equivalent of "kinit -k" to put a TGT for the name in * the credential cache file for the gssd daemon. */ if (gd->gd_clntprincipal != NULL && rpc_gss_mech_to_oid("kerberosv5", &mech_oid) && gd->gd_mech == mech_oid) { /* Get rid of any old credential. */ if (gd->gd_options.my_cred != GSS_C_NO_CREDENTIAL) { gss_release_cred(&min_stat, &gd->gd_options.my_cred); gd->gd_options.my_cred = GSS_C_NO_CREDENTIAL; } /* * The mechanism must be set to KerberosV for acquisition * of credentials to work reliably. */ maj_stat = gss_create_empty_oid_set(&min_stat, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } maj_stat = gss_add_oid_set_member(&min_stat, gd->gd_mech, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; gss_release_oid_set(&min_stat, &mechlist); goto out; } principal_desc.value = (void *)gd->gd_clntprincipal; principal_desc.length = strlen(gd->gd_clntprincipal); maj_stat = gss_import_name(&min_stat, &principal_desc, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; gss_release_oid_set(&min_stat, &mechlist); goto out; } /* Acquire the credentials. */ maj_stat = gss_acquire_cred(&min_stat, name, 0, mechlist, GSS_C_INITIATE, &gd->gd_options.my_cred, NULL, NULL); gss_release_name(&min_stat, &name); gss_release_oid_set(&min_stat, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } } principal_desc.value = (void *)gd->gd_principal; principal_desc.length = strlen(gd->gd_principal); maj_stat = gss_import_name(&min_stat, &principal_desc, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } if (my_krb_imp == KRBIMP_UNKNOWN) { maj_stat = gss_supports_lucid(&min_stat, NULL); if (maj_stat == GSS_S_COMPLETE) my_krb_imp = KRBIMP_MIT; else - my_krb_imp = KRBIMP_HESIOD1; + my_krb_imp = KRBIMP_HEIMDALV1; } /* GSS context establishment loop. */ memset(&recv_token, 0, sizeof(recv_token)); memset(&gr, 0, sizeof(gr)); memset(options_ret, 0, sizeof(*options_ret)); options_ret->major_status = GSS_S_FAILURE; recv_tokenp = GSS_C_NO_BUFFER; for (;;) { crsave = td->td_ucred; td->td_ucred = gd->gd_ucred; if (my_krb_imp == KRBIMP_MIT) maj_stat = gss_init_sec_context_lucid_v1(&min_stat, gd->gd_options.my_cred, &gd->gd_ctx, name, gd->gd_mech, gd->gd_options.req_flags, gd->gd_options.time_req, gd->gd_options.input_channel_bindings, recv_tokenp, &gd->gd_mech, /* used mech */ &send_token, &options_ret->ret_flags, &options_ret->time_req); else maj_stat = gss_init_sec_context(&min_stat, gd->gd_options.my_cred, &gd->gd_ctx, name, gd->gd_mech, gd->gd_options.req_flags, gd->gd_options.time_req, gd->gd_options.input_channel_bindings, recv_tokenp, &gd->gd_mech, /* used mech */ &send_token, &options_ret->ret_flags, &options_ret->time_req); td->td_ucred = crsave; /* * Free the token which we got from the server (if * any). Remember that this was allocated by XDR, not * GSS-API. */ if (recv_tokenp != GSS_C_NO_BUFFER) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_token); recv_tokenp = GSS_C_NO_BUFFER; } if (gd->gd_mech && rpc_gss_oid_to_mech(gd->gd_mech, &mech)) { strlcpy(options_ret->actual_mechanism, mech, sizeof(options_ret->actual_mechanism)); } if (maj_stat != GSS_S_COMPLETE && maj_stat != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("gss_init_sec_context", gd->gd_mech, maj_stat, min_stat); options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; break; } if (send_token.length != 0) { memset(&gr, 0, sizeof(gr)); bzero(&ext, sizeof(ext)); ext.rc_auth = auth; call_stat = CLNT_CALL_EXT(gd->gd_clnt, &ext, NULLPROC, (xdrproc_t)xdr_gss_buffer_desc, &send_token, (xdrproc_t)xdr_rpc_gss_init_res, (caddr_t)&gr, AUTH_TIMEOUT); gss_release_buffer(&min_stat, &send_token); if (call_stat != RPC_SUCCESS) break; if (gr.gr_major != GSS_S_COMPLETE && gr.gr_major != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("server reply", gd->gd_mech, gr.gr_major, gr.gr_minor); options_ret->major_status = gr.gr_major; options_ret->minor_status = gr.gr_minor; break; } /* * Save the server's gr_handle value, freeing * what we have already (remember that this * was allocated by XDR, not GSS-API). */ if (gr.gr_handle.length != 0) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_cred.gc_handle); gd->gd_cred.gc_handle = gr.gr_handle; } /* * Save the server's token as well. */ if (gr.gr_token.length != 0) { recv_token = gr.gr_token; recv_tokenp = &recv_token; } /* * Since we have copied out all the bits of gr * which XDR allocated for us, we don't need * to free it. */ gd->gd_cred.gc_proc = RPCSEC_GSS_CONTINUE_INIT; } if (maj_stat == GSS_S_COMPLETE) { gss_buffer_desc bufin; u_int seq, qop_state = 0; /* * gss header verifier, * usually checked in gss_validate */ seq = htonl(gr.gr_win); bufin.value = (unsigned char *)&seq; bufin.length = sizeof(seq); maj_stat = gss_verify_mic(&min_stat, gd->gd_ctx, &bufin, &gd->gd_verf, &qop_state); if (maj_stat != GSS_S_COMPLETE || qop_state != gd->gd_qop) { rpc_gss_log_status("gss_verify_mic", gd->gd_mech, maj_stat, min_stat); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); } _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; break; } options_ret->major_status = GSS_S_COMPLETE; options_ret->minor_status = 0; options_ret->rpcsec_version = gd->gd_cred.gc_version; options_ret->gss_context = gd->gd_ctx; gd->gd_cred.gc_proc = RPCSEC_GSS_DATA; gd->gd_seq = 1; gd->gd_win = gr.gr_win; break; } } gss_release_name(&min_stat, &name); xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); out: /* End context negotiation loop. */ if (gd->gd_cred.gc_proc != RPCSEC_GSS_DATA) { rpc_createerr.cf_stat = RPC_AUTHERROR; _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); if (gd->gd_ctx) { gss_delete_sec_context(&min_stat, &gd->gd_ctx, GSS_C_NO_BUFFER); } KGSS_CURVNET_RESTORE(); mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_START; wakeup(gd); mtx_unlock(&gd->gd_lock); return (FALSE); } KGSS_CURVNET_RESTORE(); mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_ESTABLISHED; wakeup(gd); mtx_unlock(&gd->gd_lock); return (TRUE); } static bool_t rpc_gss_refresh(AUTH *auth, void *msg) { struct rpc_msg *reply = (struct rpc_msg *) msg; rpc_gss_options_ret_t options; struct rpc_gss_data *gd; gd = AUTH_PRIVATE(auth); /* * If the context is in DESTROYING state, then just return, since * there is no point in refreshing the credentials. */ mtx_lock(&gd->gd_lock); if (gd->gd_state == RPCSEC_GSS_DESTROYING) { mtx_unlock(&gd->gd_lock); return (FALSE); } mtx_unlock(&gd->gd_lock); /* * If the error was RPCSEC_GSS_CREDPROBLEM of * RPCSEC_GSS_CTXPROBLEM we start again from scratch. All * other errors are fatal. */ if (reply->rm_reply.rp_stat == MSG_DENIED && reply->rm_reply.rp_rjct.rj_stat == AUTH_ERROR && (reply->rm_reply.rp_rjct.rj_why == RPCSEC_GSS_CREDPROBLEM || reply->rm_reply.rp_rjct.rj_why == RPCSEC_GSS_CTXPROBLEM)) { rpc_gss_destroy_context(auth, FALSE); memset(&options, 0, sizeof(options)); return (rpc_gss_init(auth, &options)); } return (FALSE); } static void rpc_gss_destroy_context(AUTH *auth, bool_t send_destroy) { struct rpc_gss_data *gd; struct rpc_pending_request *pr; OM_uint32 min_stat; struct rpc_callextra ext; rpc_gss_log_debug("in rpc_gss_destroy_context()"); gd = AUTH_PRIVATE(auth); mtx_lock(&gd->gd_lock); /* * If the context isn't in ESTABISHED state, someone else is * destroying/refreshing - we wait till they are done. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { while (gd->gd_state != RPCSEC_GSS_START && gd->gd_state != RPCSEC_GSS_ESTABLISHED) msleep(gd, &gd->gd_lock, 0, "gssstate", 0); mtx_unlock(&gd->gd_lock); return; } gd->gd_state = RPCSEC_GSS_DESTROYING; mtx_unlock(&gd->gd_lock); if (send_destroy) { gd->gd_cred.gc_proc = RPCSEC_GSS_DESTROY; bzero(&ext, sizeof(ext)); ext.rc_auth = auth; CLNT_CALL_EXT(gd->gd_clnt, &ext, NULLPROC, (xdrproc_t)xdr_void, NULL, (xdrproc_t)xdr_void, NULL, AUTH_TIMEOUT); } while ((pr = LIST_FIRST(&gd->gd_reqs)) != NULL) { LIST_REMOVE(pr, pr_link); mem_free(pr, sizeof(*pr)); } /* * Free the context token. Remember that this was * allocated by XDR, not GSS-API. */ xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_cred.gc_handle); gd->gd_cred.gc_handle.length = 0; if (gd->gd_ctx != GSS_C_NO_CONTEXT) gss_delete_sec_context(&min_stat, &gd->gd_ctx, NULL); mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_START; wakeup(gd); mtx_unlock(&gd->gd_lock); } static void rpc_gss_destroy(AUTH *auth) { struct rpc_gss_data *gd; rpc_gss_log_debug("in rpc_gss_destroy()"); gd = AUTH_PRIVATE(auth); if (!refcount_release(&gd->gd_refs)) return; rpc_gss_destroy_context(auth, TRUE); CLNT_RELEASE(gd->gd_clnt); crfree(gd->gd_ucred); free(gd->gd_principal, M_RPC); if (gd->gd_clntprincipal != NULL) free(gd->gd_clntprincipal, M_RPC); if (gd->gd_verf.value) xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); mtx_destroy(&gd->gd_lock); mem_free(gd, sizeof(*gd)); mem_free(auth, sizeof(*auth)); } int rpc_gss_max_data_length(AUTH *auth, int max_tp_unit_len) { struct rpc_gss_data *gd; int want_conf; OM_uint32 max; OM_uint32 maj_stat, min_stat; int result; gd = AUTH_PRIVATE(auth); switch (gd->gd_cred.gc_svc) { case rpc_gss_svc_none: return (max_tp_unit_len); break; case rpc_gss_svc_default: case rpc_gss_svc_integrity: want_conf = FALSE; break; case rpc_gss_svc_privacy: want_conf = TRUE; break; default: return (0); } maj_stat = gss_wrap_size_limit(&min_stat, gd->gd_ctx, want_conf, gd->gd_qop, max_tp_unit_len, &max); if (maj_stat == GSS_S_COMPLETE) { result = (int) max; if (result < 0) result = 0; return (result); } else { rpc_gss_log_status("gss_wrap_size_limit", gd->gd_mech, maj_stat, min_stat); return (0); } } diff --git a/sys/rpc/rpcsec_gss/rpcsec_gss_int.h b/sys/rpc/rpcsec_gss/rpcsec_gss_int.h index 02a7767220de..ba200ee3aeb7 100644 --- a/sys/rpc/rpcsec_gss/rpcsec_gss_int.h +++ b/sys/rpc/rpcsec_gss/rpcsec_gss_int.h @@ -1,101 +1,101 @@ /* rpcsec_gss.h SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: auth_gss.h,v 1.12 2001/04/30 19:44:47 andros Exp $ */ #ifndef _RPCSEC_GSS_INT_H #define _RPCSEC_GSS_INT_H #include /* RPCSEC_GSS control procedures. */ typedef enum { RPCSEC_GSS_DATA = 0, RPCSEC_GSS_INIT = 1, RPCSEC_GSS_CONTINUE_INIT = 2, RPCSEC_GSS_DESTROY = 3 } rpc_gss_proc_t; #define RPCSEC_GSS_VERSION 1 /* Credentials. */ struct rpc_gss_cred { u_int gc_version; /* version */ rpc_gss_proc_t gc_proc; /* control procedure */ u_int gc_seq; /* sequence number */ rpc_gss_service_t gc_svc; /* service */ gss_buffer_desc gc_handle; /* handle to server-side context */ }; /* Context creation response. */ struct rpc_gss_init_res { gss_buffer_desc gr_handle; /* handle to server-side context */ u_int gr_major; /* major status */ u_int gr_minor; /* minor status */ u_int gr_win; /* sequence window */ gss_buffer_desc gr_token; /* token */ }; /* Maximum sequence number value. */ #define MAXSEQ 0x80000000 enum krb_imp { KRBIMP_UNKNOWN, - KRBIMP_HESIOD1, + KRBIMP_HEIMDALV1, KRBIMP_MIT }; /* Prototypes. */ __BEGIN_DECLS bool_t xdr_rpc_gss_cred(XDR *xdrs, struct rpc_gss_cred *p); bool_t xdr_rpc_gss_init_res(XDR *xdrs, struct rpc_gss_init_res *p); bool_t xdr_rpc_gss_wrap_data(struct mbuf **argsp, gss_ctx_id_t ctx, gss_qop_t qop, rpc_gss_service_t svc, u_int seq); bool_t xdr_rpc_gss_unwrap_data(struct mbuf **resultsp, gss_ctx_id_t ctx, gss_qop_t qop, rpc_gss_service_t svc, u_int seq); const char *_rpc_gss_num_to_qop(const char *mech, u_int num); void _rpc_gss_set_error(int rpc_gss_error, int system_error); void rpc_gss_log_debug(const char *fmt, ...); void rpc_gss_log_status(const char *m, gss_OID mech, OM_uint32 major, OM_uint32 minor); __END_DECLS #endif /* !_RPCSEC_GSS_INT_H */ diff --git a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c index e047c557c712..35c904560836 100644 --- a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c +++ b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c @@ -1,1715 +1,1715 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 1990 The Regents of the University of California. * * Copyright (c) 2008 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* svc_rpcsec_gss.c Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: svc_auth_gss.c,v 1.27 2002/01/15 15:43:00 andros Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rpcsec_gss_int.h" static bool_t svc_rpc_gss_wrap(SVCAUTH *, struct mbuf **); static bool_t svc_rpc_gss_unwrap(SVCAUTH *, struct mbuf **); static void svc_rpc_gss_release(SVCAUTH *); static enum auth_stat svc_rpc_gss(struct svc_req *, struct rpc_msg *); static int rpc_gss_svc_getcred(struct svc_req *, struct ucred **, int *); static const struct svc_auth_ops svc_auth_gss_ops = { .svc_ah_wrap = svc_rpc_gss_wrap, .svc_ah_unwrap = svc_rpc_gss_unwrap, .svc_ah_release = svc_rpc_gss_release, }; struct sx svc_rpc_gss_lock; struct svc_rpc_gss_callback { SLIST_ENTRY(svc_rpc_gss_callback) cb_link; rpc_gss_callback_t cb_callback; }; SLIST_HEAD(svc_rpc_gss_callback_list, svc_rpc_gss_callback); KGSS_VNET_DEFINE_STATIC(struct svc_rpc_gss_callback_list, svc_rpc_gss_callbacks) = SLIST_HEAD_INITIALIZER(svc_rpc_gss_callbacks); struct svc_rpc_gss_svc_name { SLIST_ENTRY(svc_rpc_gss_svc_name) sn_link; char *sn_principal; gss_OID sn_mech; u_int sn_req_time; gss_cred_id_t sn_cred; u_int sn_program; u_int sn_version; }; SLIST_HEAD(svc_rpc_gss_svc_name_list, svc_rpc_gss_svc_name); KGSS_VNET_DEFINE_STATIC(struct svc_rpc_gss_svc_name_list, svc_rpc_gss_svc_names) = SLIST_HEAD_INITIALIZER(svc_rpc_gss_svc_names); enum svc_rpc_gss_client_state { CLIENT_NEW, /* still authenticating */ CLIENT_ESTABLISHED, /* context established */ CLIENT_STALE /* garbage to collect */ }; #define SVC_RPC_GSS_SEQWINDOW 128 struct svc_rpc_gss_clientid { unsigned long ci_hostid; uint32_t ci_boottime; uint32_t ci_id; }; struct svc_rpc_gss_client { TAILQ_ENTRY(svc_rpc_gss_client) cl_link; TAILQ_ENTRY(svc_rpc_gss_client) cl_alllink; volatile u_int cl_refs; struct sx cl_lock; struct svc_rpc_gss_clientid cl_id; time_t cl_expiration; /* when to gc */ enum svc_rpc_gss_client_state cl_state; /* client state */ bool_t cl_locked; /* fixed service+qop */ gss_ctx_id_t cl_ctx; /* context id */ gss_cred_id_t cl_creds; /* delegated creds */ gss_name_t cl_cname; /* client name */ struct svc_rpc_gss_svc_name *cl_sname; /* server name used */ rpc_gss_rawcred_t cl_rawcred; /* raw credentials */ rpc_gss_ucred_t cl_ucred; /* unix-style credentials */ struct ucred *cl_cred; /* kernel-style credentials */ int cl_rpcflavor; /* RPC pseudo sec flavor */ bool_t cl_done_callback; /* TRUE after call */ void *cl_cookie; /* user cookie from callback */ gid_t cl_gid_storage[NGROUPS]; gss_OID cl_mech; /* mechanism */ gss_qop_t cl_qop; /* quality of protection */ uint32_t cl_seqlast; /* sequence window origin */ uint32_t cl_seqmask[SVC_RPC_GSS_SEQWINDOW/32]; /* bitmask of seqnums */ }; TAILQ_HEAD(svc_rpc_gss_client_list, svc_rpc_gss_client); /* * This structure holds enough information to unwrap arguments or wrap * results for a given request. We use the rq_clntcred area for this * (which is a per-request buffer). */ struct svc_rpc_gss_cookedcred { struct svc_rpc_gss_client *cc_client; rpc_gss_service_t cc_service; uint32_t cc_seq; }; #define CLIENT_HASH_SIZE 256 #define CLIENT_MAX 1024 u_int svc_rpc_gss_client_max = CLIENT_MAX; u_int svc_rpc_gss_client_hash_size = CLIENT_HASH_SIZE; SYSCTL_DECL(_kern_rpc); SYSCTL_NODE(_kern_rpc, OID_AUTO, gss, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GSS"); SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_max, CTLFLAG_RW, &svc_rpc_gss_client_max, 0, "Max number of rpc-gss clients"); SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_hash, CTLFLAG_RDTUN, &svc_rpc_gss_client_hash_size, 0, "Size of rpc-gss client hash table"); static u_int svc_rpc_gss_lifetime_max = 0; SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, lifetime_max, CTLFLAG_RW, &svc_rpc_gss_lifetime_max, 0, "Maximum lifetime (seconds) of rpc-gss clients"); static u_int svc_rpc_gss_client_count; SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_count, CTLFLAG_RD, &svc_rpc_gss_client_count, 0, "Number of rpc-gss clients"); KGSS_VNET_DEFINE(struct svc_rpc_gss_client_list *, svc_rpc_gss_client_hash); KGSS_VNET_DEFINE(struct svc_rpc_gss_client_list, svc_rpc_gss_clients); KGSS_VNET_DEFINE_STATIC(uint32_t, svc_rpc_gss_next_clientid) = 1; static void svc_rpc_gss_init(void *unused __unused) { svc_auth_reg(RPCSEC_GSS, svc_rpc_gss, rpc_gss_svc_getcred); sx_init(&svc_rpc_gss_lock, "gsslock"); } SYSINIT(svc_rpc_gss_init, SI_SUB_VFS, SI_ORDER_ANY, svc_rpc_gss_init, NULL); static void svc_rpc_gss_cleanup(void *unused __unused) { sx_destroy(&svc_rpc_gss_lock); } SYSUNINIT(svc_rpc_gss_cleanup, SI_SUB_VFS, SI_ORDER_ANY, svc_rpc_gss_cleanup, NULL); static void svc_rpc_gss_vnetinit(void *unused __unused) { int i; KGSS_VNET(svc_rpc_gss_client_hash) = mem_alloc( sizeof(struct svc_rpc_gss_client_list) * svc_rpc_gss_client_hash_size); for (i = 0; i < svc_rpc_gss_client_hash_size; i++) TAILQ_INIT(&KGSS_VNET(svc_rpc_gss_client_hash)[i]); TAILQ_INIT(&KGSS_VNET(svc_rpc_gss_clients)); } VNET_SYSINIT(svc_rpc_gss_vnetinit, SI_SUB_VNET_DONE, SI_ORDER_ANY, svc_rpc_gss_vnetinit, NULL); static void svc_rpc_gss_vnet_cleanup(void *unused __unused) { mem_free(KGSS_VNET(svc_rpc_gss_client_hash), sizeof(struct svc_rpc_gss_client_list) * svc_rpc_gss_client_hash_size); } VNET_SYSUNINIT(svc_rpc_gss_vnet_cleanup, SI_SUB_VNET_DONE, SI_ORDER_ANY, svc_rpc_gss_vnet_cleanup, NULL); bool_t rpc_gss_set_callback(rpc_gss_callback_t *cb) { struct svc_rpc_gss_callback *scb; scb = mem_alloc(sizeof(struct svc_rpc_gss_callback)); if (!scb) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } scb->cb_callback = *cb; sx_xlock(&svc_rpc_gss_lock); SLIST_INSERT_HEAD(&KGSS_VNET(svc_rpc_gss_callbacks), scb, cb_link); sx_xunlock(&svc_rpc_gss_lock); return (TRUE); } void rpc_gss_clear_callback(rpc_gss_callback_t *cb) { struct svc_rpc_gss_callback *scb; sx_xlock(&svc_rpc_gss_lock); SLIST_FOREACH(scb, &KGSS_VNET(svc_rpc_gss_callbacks), cb_link) { if (scb->cb_callback.program == cb->program && scb->cb_callback.version == cb->version && scb->cb_callback.callback == cb->callback) { SLIST_REMOVE(&KGSS_VNET(svc_rpc_gss_callbacks), scb, svc_rpc_gss_callback, cb_link); sx_xunlock(&svc_rpc_gss_lock); mem_free(scb, sizeof(*scb)); return; } } sx_xunlock(&svc_rpc_gss_lock); } static bool_t rpc_gss_acquire_svc_cred(struct svc_rpc_gss_svc_name *sname) { OM_uint32 maj_stat, min_stat; gss_buffer_desc namebuf; gss_name_t name; gss_OID_set_desc oid_set; oid_set.count = 1; oid_set.elements = sname->sn_mech; namebuf.value = (void *) sname->sn_principal; namebuf.length = strlen(sname->sn_principal); maj_stat = gss_import_name(&min_stat, &namebuf, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) return (FALSE); if (sname->sn_cred != GSS_C_NO_CREDENTIAL) gss_release_cred(&min_stat, &sname->sn_cred); maj_stat = gss_acquire_cred(&min_stat, name, sname->sn_req_time, &oid_set, GSS_C_ACCEPT, &sname->sn_cred, NULL, NULL); if (maj_stat != GSS_S_COMPLETE) { gss_release_name(&min_stat, &name); return (FALSE); } gss_release_name(&min_stat, &name); return (TRUE); } bool_t rpc_gss_set_svc_name(const char *principal, const char *mechanism, u_int req_time, u_int program, u_int version) { struct svc_rpc_gss_svc_name *sname; gss_OID mech_oid; if (!rpc_gss_mech_to_oid(mechanism, &mech_oid)) return (FALSE); sname = mem_alloc(sizeof(*sname)); if (!sname) return (FALSE); sname->sn_principal = strdup(principal, M_RPC); sname->sn_mech = mech_oid; sname->sn_req_time = req_time; sname->sn_cred = GSS_C_NO_CREDENTIAL; sname->sn_program = program; sname->sn_version = version; if (!rpc_gss_acquire_svc_cred(sname)) { free(sname->sn_principal, M_RPC); mem_free(sname, sizeof(*sname)); return (FALSE); } sx_xlock(&svc_rpc_gss_lock); SLIST_INSERT_HEAD(&KGSS_VNET(svc_rpc_gss_svc_names), sname, sn_link); sx_xunlock(&svc_rpc_gss_lock); return (TRUE); } void rpc_gss_clear_svc_name(u_int program, u_int version) { OM_uint32 min_stat; struct svc_rpc_gss_svc_name *sname; sx_xlock(&svc_rpc_gss_lock); SLIST_FOREACH(sname, &KGSS_VNET(svc_rpc_gss_svc_names), sn_link) { if (sname->sn_program == program && sname->sn_version == version) { SLIST_REMOVE(&KGSS_VNET(svc_rpc_gss_svc_names), sname, svc_rpc_gss_svc_name, sn_link); sx_xunlock(&svc_rpc_gss_lock); gss_release_cred(&min_stat, &sname->sn_cred); free(sname->sn_principal, M_RPC); mem_free(sname, sizeof(*sname)); return; } } sx_xunlock(&svc_rpc_gss_lock); } bool_t rpc_gss_get_principal_name(rpc_gss_principal_t *principal, const char *mech, const char *name, const char *node, const char *domain) { OM_uint32 maj_stat, min_stat; gss_OID mech_oid; size_t namelen; gss_buffer_desc buf; gss_name_t gss_name, gss_mech_name; rpc_gss_principal_t result; if (!rpc_gss_mech_to_oid(mech, &mech_oid)) return (FALSE); /* * Construct a gss_buffer containing the full name formatted * as "name/node@domain" where node and domain are optional. */ namelen = strlen(name) + 1; if (node) { namelen += strlen(node) + 1; } if (domain) { namelen += strlen(domain) + 1; } buf.value = mem_alloc(namelen); buf.length = namelen; strcpy((char *) buf.value, name); if (node) { strcat((char *) buf.value, "/"); strcat((char *) buf.value, node); } if (domain) { strcat((char *) buf.value, "@"); strcat((char *) buf.value, domain); } /* * Convert that to a gss_name_t and then convert that to a * mechanism name in the selected mechanism. */ maj_stat = gss_import_name(&min_stat, &buf, GSS_C_NT_USER_NAME, &gss_name); mem_free(buf.value, buf.length); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_import_name", mech_oid, maj_stat, min_stat); return (FALSE); } maj_stat = gss_canonicalize_name(&min_stat, gss_name, mech_oid, &gss_mech_name); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_canonicalize_name", mech_oid, maj_stat, min_stat); gss_release_name(&min_stat, &gss_name); return (FALSE); } gss_release_name(&min_stat, &gss_name); /* * Export the mechanism name and use that to construct the * rpc_gss_principal_t result. */ maj_stat = gss_export_name(&min_stat, gss_mech_name, &buf); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_export_name", mech_oid, maj_stat, min_stat); gss_release_name(&min_stat, &gss_mech_name); return (FALSE); } gss_release_name(&min_stat, &gss_mech_name); result = mem_alloc(sizeof(int) + buf.length); if (!result) { gss_release_buffer(&min_stat, &buf); return (FALSE); } result->len = buf.length; memcpy(result->name, buf.value, buf.length); gss_release_buffer(&min_stat, &buf); *principal = result; return (TRUE); } /* * Note that the ip_addr and srv_principal pointers can point to the same * buffer, so long as ip_addr is at least strlen(srv_name) + 1 > srv_principal. */ bool_t rpc_gss_ip_to_srv_principal(char *ip_addr, const char *srv_name, char *srv_principal) { OM_uint32 maj_stat, min_stat; size_t len; /* * First fill in the service name and '@'. */ len = strlen(srv_name); if (len > NI_MAXSERV) return (FALSE); memcpy(srv_principal, srv_name, len); srv_principal[len] = '@'; /* * Do reverse DNS to get the DNS name for the ip_addr. */ maj_stat = gss_ip_to_dns(&min_stat, ip_addr, &srv_principal[len + 1]); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_ip_to_dns", NULL, maj_stat, min_stat); return (FALSE); } return (TRUE); } bool_t rpc_gss_getcred(struct svc_req *req, rpc_gss_rawcred_t **rcred, rpc_gss_ucred_t **ucred, void **cookie) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; if (req->rq_cred.oa_flavor != RPCSEC_GSS) return (FALSE); cc = req->rq_clntcred; client = cc->cc_client; if (rcred) *rcred = &client->cl_rawcred; if (ucred) *ucred = &client->cl_ucred; if (cookie) *cookie = client->cl_cookie; return (TRUE); } /* * This simpler interface is used by svc_getcred to copy the cred data * into a kernel cred structure. */ static int rpc_gss_svc_getcred(struct svc_req *req, struct ucred **crp, int *flavorp) { struct ucred *cr; struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_ucred_t *uc; if (req->rq_cred.oa_flavor != RPCSEC_GSS) return (FALSE); cc = req->rq_clntcred; client = cc->cc_client; if (flavorp) *flavorp = client->cl_rpcflavor; if (client->cl_cred) { *crp = crhold(client->cl_cred); return (TRUE); } uc = &client->cl_ucred; cr = client->cl_cred = crget(); cr->cr_uid = cr->cr_ruid = cr->cr_svuid = uc->uid; cr->cr_rgid = cr->cr_svgid = uc->gid; crsetgroups_and_egid(cr, uc->gidlen, uc->gidlist, uc->gid); cr->cr_prison = curthread->td_ucred->cr_prison; prison_hold(cr->cr_prison); *crp = crhold(cr); return (TRUE); } int rpc_gss_svc_max_data_length(struct svc_req *req, int max_tp_unit_len) { struct svc_rpc_gss_cookedcred *cc = req->rq_clntcred; struct svc_rpc_gss_client *client = cc->cc_client; int want_conf; OM_uint32 max; OM_uint32 maj_stat, min_stat; int result; switch (client->cl_rawcred.service) { case rpc_gss_svc_none: return (max_tp_unit_len); break; case rpc_gss_svc_default: case rpc_gss_svc_integrity: want_conf = FALSE; break; case rpc_gss_svc_privacy: want_conf = TRUE; break; default: return (0); } maj_stat = gss_wrap_size_limit(&min_stat, client->cl_ctx, want_conf, client->cl_qop, max_tp_unit_len, &max); if (maj_stat == GSS_S_COMPLETE) { result = (int) max; if (result < 0) result = 0; return (result); } else { rpc_gss_log_status("gss_wrap_size_limit", client->cl_mech, maj_stat, min_stat); return (0); } } static struct svc_rpc_gss_client * svc_rpc_gss_find_client(struct svc_rpc_gss_clientid *id) { struct svc_rpc_gss_client *client; struct svc_rpc_gss_client_list *list; struct timeval boottime; unsigned long hostid; rpc_gss_log_debug("in svc_rpc_gss_find_client(%d)", id->ci_id); getcredhostid(curthread->td_ucred, &hostid); getboottime(&boottime); if (id->ci_hostid != hostid || id->ci_boottime != boottime.tv_sec) return (NULL); list = &KGSS_VNET(svc_rpc_gss_client_hash) [id->ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_FOREACH(client, list, cl_link) { if (client->cl_id.ci_id == id->ci_id) { /* * Move this client to the front of the LRU * list. */ TAILQ_REMOVE(&KGSS_VNET(svc_rpc_gss_clients), client, cl_alllink); TAILQ_INSERT_HEAD(&KGSS_VNET(svc_rpc_gss_clients), client, cl_alllink); refcount_acquire(&client->cl_refs); break; } } sx_xunlock(&svc_rpc_gss_lock); return (client); } static struct svc_rpc_gss_client * svc_rpc_gss_create_client(void) { struct svc_rpc_gss_client *client; struct svc_rpc_gss_client_list *list; struct timeval boottime; unsigned long hostid; rpc_gss_log_debug("in svc_rpc_gss_create_client()"); client = mem_alloc(sizeof(struct svc_rpc_gss_client)); memset(client, 0, sizeof(struct svc_rpc_gss_client)); /* * Set the initial value of cl_refs to two. One for the caller * and the other to hold onto the client structure until it expires. */ refcount_init(&client->cl_refs, 2); sx_init(&client->cl_lock, "GSS-client"); getcredhostid(curthread->td_ucred, &hostid); client->cl_id.ci_hostid = hostid; getboottime(&boottime); client->cl_id.ci_boottime = boottime.tv_sec; client->cl_id.ci_id = KGSS_VNET(svc_rpc_gss_next_clientid)++; /* * Start the client off with a short expiration time. We will * try to get a saner value from the client creds later. */ client->cl_state = CLIENT_NEW; client->cl_locked = FALSE; client->cl_expiration = time_uptime + 5*60; list = &KGSS_VNET(svc_rpc_gss_client_hash) [client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_INSERT_HEAD(list, client, cl_link); TAILQ_INSERT_HEAD(&KGSS_VNET(svc_rpc_gss_clients), client, cl_alllink); svc_rpc_gss_client_count++; sx_xunlock(&svc_rpc_gss_lock); return (client); } static void svc_rpc_gss_destroy_client(struct svc_rpc_gss_client *client) { OM_uint32 min_stat; rpc_gss_log_debug("in svc_rpc_gss_destroy_client()"); if (client->cl_ctx) gss_delete_sec_context(&min_stat, &client->cl_ctx, GSS_C_NO_BUFFER); if (client->cl_cname) gss_release_name(&min_stat, &client->cl_cname); if (client->cl_rawcred.client_principal) mem_free(client->cl_rawcred.client_principal, sizeof(*client->cl_rawcred.client_principal) + client->cl_rawcred.client_principal->len); if (client->cl_cred) crfree(client->cl_cred); sx_destroy(&client->cl_lock); mem_free(client, sizeof(*client)); } /* * Drop a reference to a client and free it if that was the last reference. */ static void svc_rpc_gss_release_client(struct svc_rpc_gss_client *client) { if (!refcount_release(&client->cl_refs)) return; svc_rpc_gss_destroy_client(client); } /* * Remove a client from our global lists. * Must be called with svc_rpc_gss_lock held. */ static void svc_rpc_gss_forget_client_locked(struct svc_rpc_gss_client *client) { struct svc_rpc_gss_client_list *list; sx_assert(&svc_rpc_gss_lock, SX_XLOCKED); list = &KGSS_VNET(svc_rpc_gss_client_hash) [client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; TAILQ_REMOVE(list, client, cl_link); TAILQ_REMOVE(&KGSS_VNET(svc_rpc_gss_clients), client, cl_alllink); svc_rpc_gss_client_count--; } /* * Remove a client from our global lists and free it if we can. */ static void svc_rpc_gss_forget_client(struct svc_rpc_gss_client *client) { struct svc_rpc_gss_client_list *list; struct svc_rpc_gss_client *tclient; list = &KGSS_VNET(svc_rpc_gss_client_hash) [client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_FOREACH(tclient, list, cl_link) { /* * Make sure this client has not already been removed * from the lists by svc_rpc_gss_forget_client() or * svc_rpc_gss_forget_client_locked(). */ if (client == tclient) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); svc_rpc_gss_release_client(client); return; } } sx_xunlock(&svc_rpc_gss_lock); } static void svc_rpc_gss_timeout_clients(void) { struct svc_rpc_gss_client *client; time_t now = time_uptime; rpc_gss_log_debug("in svc_rpc_gss_timeout_clients()"); /* * First enforce the max client limit. We keep * svc_rpc_gss_clients in LRU order. */ sx_xlock(&svc_rpc_gss_lock); client = TAILQ_LAST(&KGSS_VNET(svc_rpc_gss_clients), svc_rpc_gss_client_list); while (svc_rpc_gss_client_count > svc_rpc_gss_client_max && client != NULL) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); svc_rpc_gss_release_client(client); sx_xlock(&svc_rpc_gss_lock); client = TAILQ_LAST(&KGSS_VNET(svc_rpc_gss_clients), svc_rpc_gss_client_list); } again: TAILQ_FOREACH(client, &KGSS_VNET(svc_rpc_gss_clients), cl_alllink) { if (client->cl_state == CLIENT_STALE || now > client->cl_expiration) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); rpc_gss_log_debug("expiring client %p", client); svc_rpc_gss_release_client(client); sx_xlock(&svc_rpc_gss_lock); goto again; } } sx_xunlock(&svc_rpc_gss_lock); } #ifdef DEBUG /* * OID<->string routines. These are uuuuugly. */ static OM_uint32 gss_oid_to_str(OM_uint32 *minor_status, gss_OID oid, gss_buffer_t oid_str) { char numstr[128]; unsigned long number; int numshift; size_t string_length; size_t i; unsigned char *cp; char *bp; /* Decoded according to krb5/gssapi_krb5.c */ /* First determine the size of the string */ string_length = 0; number = 0; numshift = 0; cp = (unsigned char *) oid->elements; number = (unsigned long) cp[0]; sprintf(numstr, "%ld ", number/40); string_length += strlen(numstr); sprintf(numstr, "%ld ", number%40); string_length += strlen(numstr); for (i=1; ilength; i++) { if ( (size_t) (numshift+7) < (sizeof(unsigned long)*8)) { number = (number << 7) | (cp[i] & 0x7f); numshift += 7; } else { *minor_status = 0; return(GSS_S_FAILURE); } if ((cp[i] & 0x80) == 0) { sprintf(numstr, "%ld ", number); string_length += strlen(numstr); number = 0; numshift = 0; } } /* * If we get here, we've calculated the length of "n n n ... n ". Add 4 * here for "{ " and "}\0". */ string_length += 4; if ((bp = malloc(string_length, M_GSSAPI, M_WAITOK | M_ZERO))) { strcpy(bp, "{ "); number = (unsigned long) cp[0]; sprintf(numstr, "%ld ", number/40); strcat(bp, numstr); sprintf(numstr, "%ld ", number%40); strcat(bp, numstr); number = 0; cp = (unsigned char *) oid->elements; for (i=1; ilength; i++) { number = (number << 7) | (cp[i] & 0x7f); if ((cp[i] & 0x80) == 0) { sprintf(numstr, "%ld ", number); strcat(bp, numstr); number = 0; } } strcat(bp, "}"); oid_str->length = strlen(bp)+1; oid_str->value = (void *) bp; *minor_status = 0; return(GSS_S_COMPLETE); } *minor_status = 0; return(GSS_S_FAILURE); } #endif static void svc_rpc_gss_build_ucred(struct svc_rpc_gss_client *client, const gss_name_t name) { OM_uint32 maj_stat, min_stat; rpc_gss_ucred_t *uc = &client->cl_ucred; int numgroups; uc->uid = 65534; uc->gid = 65534; uc->gidlist = client->cl_gid_storage; numgroups = NGROUPS; maj_stat = gss_pname_to_unix_cred(&min_stat, name, client->cl_mech, &uc->uid, &uc->gid, &numgroups, &uc->gidlist[0]); if (GSS_ERROR(maj_stat)) uc->gidlen = 0; else uc->gidlen = numgroups; } static void svc_rpc_gss_set_flavor(struct svc_rpc_gss_client *client) { static gss_OID_desc krb5_mech_oid = {9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" }; /* * Attempt to translate mech type and service into a * 'pseudo flavor'. Hardwire in krb5 support for now. */ if (kgss_oid_equal(client->cl_mech, &krb5_mech_oid)) { switch (client->cl_rawcred.service) { case rpc_gss_svc_default: case rpc_gss_svc_none: client->cl_rpcflavor = RPCSEC_GSS_KRB5; break; case rpc_gss_svc_integrity: client->cl_rpcflavor = RPCSEC_GSS_KRB5I; break; case rpc_gss_svc_privacy: client->cl_rpcflavor = RPCSEC_GSS_KRB5P; break; } } else { client->cl_rpcflavor = RPCSEC_GSS; } } static bool_t svc_rpc_gss_accept_sec_context(struct svc_rpc_gss_client *client, struct svc_req *rqst, struct rpc_gss_init_res *gr, struct rpc_gss_cred *gc) { gss_buffer_desc recv_tok; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags; OM_uint32 cred_lifetime; struct svc_rpc_gss_svc_name *sname; gss_buffer_desc export_name; rpc_gss_ucred_t *uc = &client->cl_ucred; int numgroups; static enum krb_imp my_krb_imp = KRBIMP_UNKNOWN; rpc_gss_log_debug("in svc_rpc_gss_accept_context()"); if (my_krb_imp == KRBIMP_UNKNOWN) { maj_stat = gss_supports_lucid(&min_stat, NULL); if (maj_stat == GSS_S_COMPLETE) my_krb_imp = KRBIMP_MIT; else - my_krb_imp = KRBIMP_HESIOD1; + my_krb_imp = KRBIMP_HEIMDALV1; min_stat = 0; } if (my_krb_imp == KRBIMP_MIT) { uc->uid = 65534; uc->gid = 65534; uc->gidlist = client->cl_gid_storage; numgroups = NGROUPS; } /* Deserialize arguments. */ memset(&recv_tok, 0, sizeof(recv_tok)); if (!svc_getargs(rqst, (xdrproc_t) xdr_gss_buffer_desc, (caddr_t) &recv_tok)) { client->cl_state = CLIENT_STALE; return (FALSE); } /* * First time round, try all the server names we have until * one matches. Afterwards, stick with that one. */ sx_xlock(&svc_rpc_gss_lock); if (!client->cl_sname) { SLIST_FOREACH(sname, &KGSS_VNET(svc_rpc_gss_svc_names), sn_link) { if (sname->sn_program == rqst->rq_prog && sname->sn_version == rqst->rq_vers) { retry: if (my_krb_imp == KRBIMP_MIT) gr->gr_major = gss_accept_sec_context_lucid_v1( &gr->gr_minor, &client->cl_ctx, sname->sn_cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &client->cl_cname, &mech, &gr->gr_token, &ret_flags, &cred_lifetime, &client->cl_creds, &export_name, &uc->uid, &uc->gid, &numgroups, &uc->gidlist[0]); else gr->gr_major = gss_accept_sec_context( &gr->gr_minor, &client->cl_ctx, sname->sn_cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &client->cl_cname, &mech, &gr->gr_token, &ret_flags, &cred_lifetime, &client->cl_creds); if (gr->gr_major == GSS_S_CREDENTIALS_EXPIRED) { /* * Either our creds really did * expire or gssd was * restarted. */ if (rpc_gss_acquire_svc_cred(sname)) goto retry; } client->cl_sname = sname; break; } } if (!sname) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_tok); sx_xunlock(&svc_rpc_gss_lock); return (FALSE); } } else { if (my_krb_imp == KRBIMP_MIT) gr->gr_major = gss_accept_sec_context_lucid_v1( &gr->gr_minor, &client->cl_ctx, client->cl_sname->sn_cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &client->cl_cname, &mech, &gr->gr_token, &ret_flags, &cred_lifetime, NULL, &export_name, &uc->uid, &uc->gid, &numgroups, &uc->gidlist[0]); else gr->gr_major = gss_accept_sec_context( &gr->gr_minor, &client->cl_ctx, client->cl_sname->sn_cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &client->cl_cname, &mech, &gr->gr_token, &ret_flags, &cred_lifetime, NULL); } sx_xunlock(&svc_rpc_gss_lock); xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_tok); /* * If we get an error from gss_accept_sec_context, send the * reply anyway so that the client gets a chance to see what * is wrong. */ if (gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("accept_sec_context", client->cl_mech, gr->gr_major, gr->gr_minor); client->cl_state = CLIENT_STALE; if (my_krb_imp == KRBIMP_MIT) uc->gidlen = 0; return (TRUE); } if (my_krb_imp == KRBIMP_MIT) uc->gidlen = numgroups; gr->gr_handle.value = &client->cl_id; gr->gr_handle.length = sizeof(client->cl_id); gr->gr_win = SVC_RPC_GSS_SEQWINDOW; /* Save client info. */ client->cl_mech = mech; client->cl_qop = GSS_C_QOP_DEFAULT; client->cl_done_callback = FALSE; if (gr->gr_major == GSS_S_COMPLETE) { /* * Change client expiration time to be near when the * client creds expire (or 24 hours if we can't figure * that out). */ if (cred_lifetime == GSS_C_INDEFINITE) cred_lifetime = 24*60*60; /* * Cap cred_lifetime if sysctl kern.rpc.gss.lifetime_max is set. */ if (svc_rpc_gss_lifetime_max > 0 && cred_lifetime > svc_rpc_gss_lifetime_max) cred_lifetime = svc_rpc_gss_lifetime_max; client->cl_expiration = time_uptime + cred_lifetime; /* * Fill in cred details in the rawcred structure. */ client->cl_rawcred.version = RPCSEC_GSS_VERSION; rpc_gss_oid_to_mech(mech, &client->cl_rawcred.mechanism); maj_stat = GSS_S_COMPLETE; if (my_krb_imp != KRBIMP_MIT) maj_stat = gss_export_name(&min_stat, client->cl_cname, &export_name); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_export_name", client->cl_mech, maj_stat, min_stat); return (FALSE); } client->cl_rawcred.client_principal = mem_alloc(sizeof(*client->cl_rawcred.client_principal) + export_name.length); client->cl_rawcred.client_principal->len = export_name.length; memcpy(client->cl_rawcred.client_principal->name, export_name.value, export_name.length); gss_release_buffer(&min_stat, &export_name); client->cl_rawcred.svc_principal = client->cl_sname->sn_principal; client->cl_rawcred.service = gc->gc_svc; /* * Use gss_pname_to_uid to map to unix creds. For * kerberos5, this uses krb5_aname_to_localname. */ if (my_krb_imp != KRBIMP_MIT) svc_rpc_gss_build_ucred(client, client->cl_cname); svc_rpc_gss_set_flavor(client); gss_release_name(&min_stat, &client->cl_cname); #ifdef DEBUG { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); rpc_gss_log_debug("accepted context for %s with " "", client->cl_rawcred.client_principal->name, mechname.length, (char *)mechname.value, client->cl_qop, client->cl_rawcred.service); gss_release_buffer(&min_stat, &mechname); } #endif /* DEBUG */ } return (TRUE); } static bool_t svc_rpc_gss_validate(struct svc_rpc_gss_client *client, struct rpc_msg *msg, gss_qop_t *qop, rpc_gss_proc_t gcproc) { struct opaque_auth *oa; gss_buffer_desc rpcbuf, checksum; OM_uint32 maj_stat, min_stat; gss_qop_t qop_state; int32_t rpchdr[128 / sizeof(int32_t)]; int32_t *buf; rpc_gss_log_debug("in svc_rpc_gss_validate()"); memset(rpchdr, 0, sizeof(rpchdr)); /* Reconstruct RPC header for signing (from xdr_callmsg). */ buf = rpchdr; IXDR_PUT_LONG(buf, msg->rm_xid); IXDR_PUT_ENUM(buf, msg->rm_direction); IXDR_PUT_LONG(buf, msg->rm_call.cb_rpcvers); IXDR_PUT_LONG(buf, msg->rm_call.cb_prog); IXDR_PUT_LONG(buf, msg->rm_call.cb_vers); IXDR_PUT_LONG(buf, msg->rm_call.cb_proc); oa = &msg->rm_call.cb_cred; IXDR_PUT_ENUM(buf, oa->oa_flavor); IXDR_PUT_LONG(buf, oa->oa_length); if (oa->oa_length) { memcpy((caddr_t)buf, oa->oa_base, oa->oa_length); buf += RNDUP(oa->oa_length) / sizeof(int32_t); } rpcbuf.value = rpchdr; rpcbuf.length = (u_char *)buf - (u_char *)rpchdr; checksum.value = msg->rm_call.cb_verf.oa_base; checksum.length = msg->rm_call.cb_verf.oa_length; maj_stat = gss_verify_mic(&min_stat, client->cl_ctx, &rpcbuf, &checksum, &qop_state); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_verify_mic", client->cl_mech, maj_stat, min_stat); /* * A bug in some versions of the Linux client generates a * Destroy operation with a bogus encrypted checksum. Deleting * the credential handle for that case causes the mount to fail. * Since the checksum is bogus (gss_verify_mic() failed), it * doesn't make sense to destroy the handle and not doing so * fixes the Linux mount. */ if (gcproc != RPCSEC_GSS_DESTROY) client->cl_state = CLIENT_STALE; return (FALSE); } *qop = qop_state; return (TRUE); } static bool_t svc_rpc_gss_nextverf(struct svc_rpc_gss_client *client, struct svc_req *rqst, u_int seq) { gss_buffer_desc signbuf; gss_buffer_desc mic; OM_uint32 maj_stat, min_stat; uint32_t nseq; rpc_gss_log_debug("in svc_rpc_gss_nextverf()"); nseq = htonl(seq); signbuf.value = &nseq; signbuf.length = sizeof(nseq); maj_stat = gss_get_mic(&min_stat, client->cl_ctx, client->cl_qop, &signbuf, &mic); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_get_mic", client->cl_mech, maj_stat, min_stat); client->cl_state = CLIENT_STALE; return (FALSE); } KASSERT(mic.length <= MAX_AUTH_BYTES, ("MIC too large for RPCSEC_GSS")); rqst->rq_verf.oa_flavor = RPCSEC_GSS; rqst->rq_verf.oa_length = mic.length; bcopy(mic.value, rqst->rq_verf.oa_base, mic.length); gss_release_buffer(&min_stat, &mic); return (TRUE); } static bool_t svc_rpc_gss_callback(struct svc_rpc_gss_client *client, struct svc_req *rqst) { struct svc_rpc_gss_callback *scb; rpc_gss_lock_t lock; void *cookie; bool_t cb_res; bool_t result; /* * See if we have a callback for this guy. */ result = TRUE; SLIST_FOREACH(scb, &KGSS_VNET(svc_rpc_gss_callbacks), cb_link) { if (scb->cb_callback.program == rqst->rq_prog && scb->cb_callback.version == rqst->rq_vers) { /* * This one matches. Call the callback and see * if it wants to veto or something. */ lock.locked = FALSE; lock.raw_cred = &client->cl_rawcred; cb_res = scb->cb_callback.callback(rqst, client->cl_creds, client->cl_ctx, &lock, &cookie); if (!cb_res) { client->cl_state = CLIENT_STALE; result = FALSE; break; } /* * The callback accepted the connection - it * is responsible for freeing client->cl_creds * now. */ client->cl_creds = GSS_C_NO_CREDENTIAL; client->cl_locked = lock.locked; client->cl_cookie = cookie; return (TRUE); } } /* * Either no callback exists for this program/version or one * of the callbacks rejected the connection. We just need to * clean up the delegated client creds, if any. */ if (client->cl_creds) { OM_uint32 min_ver; gss_release_cred(&min_ver, &client->cl_creds); } return (result); } static bool_t svc_rpc_gss_check_replay(struct svc_rpc_gss_client *client, uint32_t seq) { uint32_t offset; int word, bit; bool_t result; sx_xlock(&client->cl_lock); if (seq <= client->cl_seqlast) { /* * The request sequence number is less than * the largest we have seen so far. If it is * outside the window or if we have seen a * request with this sequence before, silently * discard it. */ offset = client->cl_seqlast - seq; if (offset >= SVC_RPC_GSS_SEQWINDOW) { result = FALSE; goto out; } word = offset / 32; bit = offset % 32; if (client->cl_seqmask[word] & (1 << bit)) { result = FALSE; goto out; } } result = TRUE; out: sx_xunlock(&client->cl_lock); return (result); } static void svc_rpc_gss_update_seq(struct svc_rpc_gss_client *client, uint32_t seq) { int offset, i, word, bit; uint32_t carry, newcarry; sx_xlock(&client->cl_lock); if (seq > client->cl_seqlast) { /* * This request has a sequence number greater * than any we have seen so far. Advance the * seq window and set bit zero of the window * (which corresponds to the new sequence * number) */ offset = seq - client->cl_seqlast; while (offset > 32) { for (i = (SVC_RPC_GSS_SEQWINDOW / 32) - 1; i > 0; i--) { client->cl_seqmask[i] = client->cl_seqmask[i-1]; } client->cl_seqmask[0] = 0; offset -= 32; } carry = 0; for (i = 0; i < SVC_RPC_GSS_SEQWINDOW / 32; i++) { newcarry = client->cl_seqmask[i] >> (32 - offset); client->cl_seqmask[i] = (client->cl_seqmask[i] << offset) | carry; carry = newcarry; } client->cl_seqmask[0] |= 1; client->cl_seqlast = seq; } else { offset = client->cl_seqlast - seq; word = offset / 32; bit = offset % 32; client->cl_seqmask[word] |= (1 << bit); } sx_xunlock(&client->cl_lock); } enum auth_stat svc_rpc_gss(struct svc_req *rqst, struct rpc_msg *msg) { OM_uint32 min_stat; XDR xdrs; struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; struct rpc_gss_cred gc; struct rpc_gss_init_res gr; gss_qop_t qop; int call_stat; enum auth_stat result; KGSS_CURVNET_SET_QUIET(KGSS_TD_TO_VNET(curthread)); rpc_gss_log_debug("in svc_rpc_gss()"); /* Garbage collect old clients. */ svc_rpc_gss_timeout_clients(); /* Initialize reply. */ rqst->rq_verf = _null_auth; /* Deserialize client credentials. */ if (rqst->rq_cred.oa_length <= 0) { KGSS_CURVNET_RESTORE(); return (AUTH_BADCRED); } memset(&gc, 0, sizeof(gc)); xdrmem_create(&xdrs, rqst->rq_cred.oa_base, rqst->rq_cred.oa_length, XDR_DECODE); if (!xdr_rpc_gss_cred(&xdrs, &gc)) { XDR_DESTROY(&xdrs); KGSS_CURVNET_RESTORE(); return (AUTH_BADCRED); } XDR_DESTROY(&xdrs); client = NULL; /* Check version. */ if (gc.gc_version != RPCSEC_GSS_VERSION) { result = AUTH_BADCRED; goto out; } /* Check the proc and find the client (or create it) */ if (gc.gc_proc == RPCSEC_GSS_INIT) { if (gc.gc_handle.length != 0) { result = AUTH_BADCRED; goto out; } client = svc_rpc_gss_create_client(); } else { struct svc_rpc_gss_clientid *p; if (gc.gc_handle.length != sizeof(*p)) { result = AUTH_BADCRED; goto out; } p = gc.gc_handle.value; client = svc_rpc_gss_find_client(p); if (!client) { /* * Can't find the client - we may have * destroyed it - tell the other side to * re-authenticate. */ result = RPCSEC_GSS_CREDPROBLEM; goto out; } } cc = rqst->rq_clntcred; cc->cc_client = client; cc->cc_service = gc.gc_svc; cc->cc_seq = gc.gc_seq; /* * The service and sequence number must be ignored for * RPCSEC_GSS_INIT and RPCSEC_GSS_CONTINUE_INIT. */ if (gc.gc_proc != RPCSEC_GSS_INIT && gc.gc_proc != RPCSEC_GSS_CONTINUE_INIT) { /* * Check for sequence number overflow. */ if (gc.gc_seq >= MAXSEQ) { result = RPCSEC_GSS_CTXPROBLEM; goto out; } /* * Check for valid service. */ if (gc.gc_svc != rpc_gss_svc_none && gc.gc_svc != rpc_gss_svc_integrity && gc.gc_svc != rpc_gss_svc_privacy) { result = AUTH_BADCRED; goto out; } } /* Handle RPCSEC_GSS control procedure. */ switch (gc.gc_proc) { case RPCSEC_GSS_INIT: case RPCSEC_GSS_CONTINUE_INIT: if (rqst->rq_proc != NULLPROC) { result = AUTH_REJECTEDCRED; break; } memset(&gr, 0, sizeof(gr)); if (!svc_rpc_gss_accept_sec_context(client, rqst, &gr, &gc)) { result = AUTH_REJECTEDCRED; break; } if (gr.gr_major == GSS_S_COMPLETE) { /* * We borrow the space for the call verf to * pack our reply verf. */ rqst->rq_verf = msg->rm_call.cb_verf; if (!svc_rpc_gss_nextverf(client, rqst, gr.gr_win)) { result = AUTH_REJECTEDCRED; break; } } else { rqst->rq_verf = _null_auth; } call_stat = svc_sendreply(rqst, (xdrproc_t) xdr_rpc_gss_init_res, (caddr_t) &gr); gss_release_buffer(&min_stat, &gr.gr_token); if (!call_stat) { result = AUTH_FAILED; break; } if (gr.gr_major == GSS_S_COMPLETE) client->cl_state = CLIENT_ESTABLISHED; result = RPCSEC_GSS_NODISPATCH; break; case RPCSEC_GSS_DATA: case RPCSEC_GSS_DESTROY: if (!svc_rpc_gss_check_replay(client, gc.gc_seq)) { result = RPCSEC_GSS_NODISPATCH; break; } if (!svc_rpc_gss_validate(client, msg, &qop, gc.gc_proc)) { result = RPCSEC_GSS_CREDPROBLEM; break; } /* * We borrow the space for the call verf to pack our * reply verf. */ rqst->rq_verf = msg->rm_call.cb_verf; if (!svc_rpc_gss_nextverf(client, rqst, gc.gc_seq)) { result = RPCSEC_GSS_CTXPROBLEM; break; } svc_rpc_gss_update_seq(client, gc.gc_seq); /* * Change the SVCAUTH ops on the request to point at * our own code so that we can unwrap the arguments * and wrap the result. The caller will re-set this on * every request to point to a set of null wrap/unwrap * methods. Acquire an extra reference to the client * which will be released by svc_rpc_gss_release() * after the request has finished processing. */ refcount_acquire(&client->cl_refs); rqst->rq_auth.svc_ah_ops = &svc_auth_gss_ops; rqst->rq_auth.svc_ah_private = cc; if (gc.gc_proc == RPCSEC_GSS_DATA) { /* * We might be ready to do a callback to the server to * see if it wants to accept/reject the connection. */ sx_xlock(&client->cl_lock); if (!client->cl_done_callback) { client->cl_done_callback = TRUE; client->cl_qop = qop; client->cl_rawcred.qop = _rpc_gss_num_to_qop( client->cl_rawcred.mechanism, qop); if (!svc_rpc_gss_callback(client, rqst)) { result = AUTH_REJECTEDCRED; sx_xunlock(&client->cl_lock); break; } } sx_xunlock(&client->cl_lock); /* * If the server has locked this client to a * particular service+qop pair, enforce that * restriction now. */ if (client->cl_locked) { if (client->cl_rawcred.service != gc.gc_svc) { result = AUTH_FAILED; break; } else if (client->cl_qop != qop) { result = AUTH_BADVERF; break; } } /* * If the qop changed, look up the new qop * name for rawcred. */ if (client->cl_qop != qop) { client->cl_qop = qop; client->cl_rawcred.qop = _rpc_gss_num_to_qop( client->cl_rawcred.mechanism, qop); } /* * Make sure we use the right service value * for unwrap/wrap. */ if (client->cl_rawcred.service != gc.gc_svc) { client->cl_rawcred.service = gc.gc_svc; svc_rpc_gss_set_flavor(client); } result = AUTH_OK; } else { if (rqst->rq_proc != NULLPROC) { result = AUTH_REJECTEDCRED; break; } call_stat = svc_sendreply(rqst, (xdrproc_t) xdr_void, (caddr_t) NULL); if (!call_stat) { result = AUTH_FAILED; break; } svc_rpc_gss_forget_client(client); result = RPCSEC_GSS_NODISPATCH; break; } break; default: result = AUTH_BADCRED; break; } out: if (client) svc_rpc_gss_release_client(client); xdr_free((xdrproc_t) xdr_rpc_gss_cred, (char *) &gc); KGSS_CURVNET_RESTORE(); return (result); } static bool_t svc_rpc_gss_wrap(SVCAUTH *auth, struct mbuf **mp) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_wrap()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; if (client->cl_state != CLIENT_ESTABLISHED || cc->cc_service == rpc_gss_svc_none || *mp == NULL) { return (TRUE); } return (xdr_rpc_gss_wrap_data(mp, client->cl_ctx, client->cl_qop, cc->cc_service, cc->cc_seq)); } static bool_t svc_rpc_gss_unwrap(SVCAUTH *auth, struct mbuf **mp) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_unwrap()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; if (client->cl_state != CLIENT_ESTABLISHED || cc->cc_service == rpc_gss_svc_none) { return (TRUE); } return (xdr_rpc_gss_unwrap_data(mp, client->cl_ctx, client->cl_qop, cc->cc_service, cc->cc_seq)); } static void svc_rpc_gss_release(SVCAUTH *auth) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_release()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; svc_rpc_gss_release_client(client); }