diff --git a/sys/kgssapi/gss_accept_sec_context.c b/sys/kgssapi/gss_accept_sec_context.c index 723ed9db9072..8a49b85be852 100644 --- a/sys/kgssapi/gss_accept_sec_context.c +++ b/sys/kgssapi/gss_accept_sec_context.c @@ -1,144 +1,287 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include "gssd.h" #include "kgss_if.h" +/* + * This function should only be called when the gssd + * daemon running on the system is an old one that + * does not use gss_krb5_export_lucid_sec_context(). + */ OM_uint32 gss_accept_sec_context(OM_uint32 *minor_status, gss_ctx_id_t *context_handle, const gss_cred_id_t acceptor_cred_handle, const gss_buffer_t input_token, const gss_channel_bindings_t input_chan_bindings, gss_name_t *src_name, gss_OID *mech_type, gss_buffer_t output_token, OM_uint32 *ret_flags, OM_uint32 *time_rec, gss_cred_id_t *delegated_cred_handle) { struct accept_sec_context_res res; struct accept_sec_context_args args; enum clnt_stat stat; gss_ctx_id_t ctx = *context_handle; gss_name_t name; gss_cred_id_t cred; CLIENT *cl; cl = kgss_gssd_client(); if (cl == NULL) { *minor_status = 0; return (GSS_S_FAILURE); } if (ctx) args.ctx = ctx->handle; else args.ctx = 0; if (acceptor_cred_handle) args.cred = acceptor_cred_handle->handle; else args.cred = 0; args.input_token = *input_token; args.input_chan_bindings = input_chan_bindings; bzero(&res, sizeof(res)); stat = gssd_accept_sec_context_1(&args, &res, cl); CLNT_RELEASE(cl); if (stat != RPC_SUCCESS) { *minor_status = stat; return (GSS_S_FAILURE); } if (res.major_status != GSS_S_COMPLETE && res.major_status != GSS_S_CONTINUE_NEEDED) { *minor_status = res.minor_status; xdr_free((xdrproc_t) xdr_accept_sec_context_res, &res); return (res.major_status); } *minor_status = res.minor_status; if (!ctx) { ctx = kgss_create_context(res.mech_type); if (!ctx) { xdr_free((xdrproc_t) xdr_accept_sec_context_res, &res); *minor_status = 0; return (GSS_S_BAD_MECH); } } *context_handle = ctx; ctx->handle = res.ctx; name = malloc(sizeof(struct _gss_name_t), M_GSSAPI, M_WAITOK); name->handle = res.src_name; if (src_name) { *src_name = name; } else { OM_uint32 junk; gss_release_name(&junk, &name); } if (mech_type) *mech_type = KGSS_MECH_TYPE(ctx); kgss_copy_buffer(&res.output_token, output_token); if (ret_flags) *ret_flags = res.ret_flags; if (time_rec) *time_rec = res.time_rec; cred = malloc(sizeof(struct _gss_cred_id_t), M_GSSAPI, M_WAITOK); cred->handle = res.delegated_cred_handle; if (delegated_cred_handle) { *delegated_cred_handle = cred; } else { OM_uint32 junk; gss_release_cred(&junk, &cred); } xdr_free((xdrproc_t) xdr_accept_sec_context_res, &res); /* * If the context establishment is complete, export it from * userland and hand the result (which includes key material * etc.) to the kernel implementation. */ if (res.major_status == GSS_S_COMPLETE) - res.major_status = kgss_transfer_context(ctx); + res.major_status = kgss_transfer_context(ctx, NULL); + + return (res.major_status); +} + +/* + * This function should be called when the gssd daemon is + * one that uses gss_krb5_export_lucid_sec_context(). + * There is a lot of code common with + * gss_accept_sec_context(). However, the structures used + * are not the same and future changes may be needed for + * this one. As such, I have not factored out the common + * code. + * gss_supports_lucid() may be used to check to see if the + * gssd daemon uses gss_krb5_export_lucid_sec_context(). + */ +OM_uint32 gss_accept_sec_context_lucid_v1(OM_uint32 *minor_status, + gss_ctx_id_t *context_handle, + const gss_cred_id_t acceptor_cred_handle, + const gss_buffer_t input_token, + const gss_channel_bindings_t input_chan_bindings, + gss_name_t *src_name, + gss_OID *mech_type, + gss_buffer_t output_token, + OM_uint32 *ret_flags, + OM_uint32 *time_rec, + gss_cred_id_t *delegated_cred_handle, + gss_buffer_t exported_name, + uid_t *uidp, + gid_t *gidp, + int *numgroups, + gid_t *groups) +{ + struct accept_sec_context_lucid_v1_res res; + struct accept_sec_context_lucid_v1_args args; + enum clnt_stat stat; + gss_ctx_id_t ctx = *context_handle; + gss_name_t name; + gss_cred_id_t cred; + CLIENT *cl; + + cl = kgss_gssd_client(); + if (cl == NULL) { + *minor_status = 0; + return (GSS_S_FAILURE); + } + + if (ctx) + args.ctx = ctx->handle; + else + args.ctx = 0; + if (acceptor_cred_handle) + args.cred = acceptor_cred_handle->handle; + else + args.cred = 0; + args.input_token = *input_token; + args.input_chan_bindings = input_chan_bindings; + + bzero(&res, sizeof(res)); + stat = gssd_accept_sec_context_lucid_v1_1(&args, &res, cl); + CLNT_RELEASE(cl); + if (stat != RPC_SUCCESS) { + *minor_status = stat; + return (GSS_S_FAILURE); + } + + if (res.major_status != GSS_S_COMPLETE + && res.major_status != GSS_S_CONTINUE_NEEDED) { + *minor_status = res.minor_status; + xdr_free((xdrproc_t) xdr_accept_sec_context_res, &res); + return (res.major_status); + } + + *minor_status = res.minor_status; + + if (!ctx) { + ctx = kgss_create_context(res.mech_type); + if (!ctx) { + xdr_free((xdrproc_t) xdr_accept_sec_context_res, &res); + *minor_status = 0; + return (GSS_S_BAD_MECH); + } + } + *context_handle = ctx; + + ctx->handle = res.ctx; + name = malloc(sizeof(struct _gss_name_t), M_GSSAPI, M_WAITOK); + name->handle = res.src_name; + if (src_name) { + *src_name = name; + } else { + OM_uint32 junk; + gss_release_name(&junk, &name); + } + if (mech_type) + *mech_type = KGSS_MECH_TYPE(ctx); + kgss_copy_buffer(&res.output_token, output_token); + if (ret_flags) + *ret_flags = res.ret_flags; + if (time_rec) + *time_rec = res.time_rec; + cred = malloc(sizeof(struct _gss_cred_id_t), M_GSSAPI, M_WAITOK); + cred->handle = res.delegated_cred_handle; + if (delegated_cred_handle) { + *delegated_cred_handle = cred; + } else { + OM_uint32 junk; + gss_release_cred(&junk, &cred); + } + + /* + * If the context establishment is complete, export it from + * userland and hand the result (which includes key material + * etc.) to the kernel implementation. + */ + if (res.major_status == GSS_S_COMPLETE) { + int i, n; + + /* First, get the unix credentials. */ + *uidp = res.uid; + *gidp = res.gid; + n = res.gidlist.gidlist_len; + if (n > *numgroups) + n = *numgroups; + for (i = 0; i < n; i++) + groups[i] = res.gidlist.gidlist_val[i]; + *numgroups = n; + + /* Next, get the exported_name. */ + kgss_copy_buffer(&res.exported_name, exported_name); + + /* Now, handle the lucid credential setup. */ + res.major_status = kgss_transfer_context(ctx, &res.lucid); + if (res.major_status != GSS_S_COMPLETE) + printf("gss_accept_sec_context_lucid_v1: " + "transfer failed\n"); + } + + xdr_free((xdrproc_t) xdr_accept_sec_context_res, &res); return (res.major_status); } diff --git a/sys/kgssapi/gss_impl.c b/sys/kgssapi/gss_impl.c index e2569bea61f9..c9cd4d880695 100644 --- a/sys/kgssapi/gss_impl.c +++ b/sys/kgssapi/gss_impl.c @@ -1,315 +1,321 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gssd.h" #include "kgss_if.h" MALLOC_DEFINE(M_GSSAPI, "GSS-API", "GSS-API"); struct kgss_mech_list kgss_mechs; struct mtx kgss_gssd_lock; KGSS_VNET_DEFINE(CLIENT *, kgss_gssd_handle) = NULL; static int kgss_load(void) { CLIENT *cl; LIST_INIT(&kgss_mechs); cl = client_nl_create("kgss", GSSD, GSSDVERS); KASSERT(cl, ("%s: netlink client already exist", __func__)); /* * The transport default is no retries at all, since there could * be no userland listener to our messages. We will retry for 5 * minutes with 10 second interval. This will potentially cure hosts * with misconfigured startup, where kernel starts sending GSS queries * before userland had started up the gssd(8) daemon. */ clnt_control(cl, CLSET_RETRIES, &(int){30}); clnt_control(cl, CLSET_TIMEOUT, &(struct timeval){.tv_sec = 300}); /* * We literally wait on gssd(8), let's see that in top(1). */ clnt_control(cl, CLSET_WAITCHAN, "gssd"); KGSS_CURVNET_SET_QUIET(KGSS_TD_TO_VNET(curthread)); mtx_lock(&kgss_gssd_lock); KGSS_VNET(kgss_gssd_handle) = cl; mtx_unlock(&kgss_gssd_lock); KGSS_CURVNET_RESTORE(); return (0); } static void kgss_unload(void) { KGSS_CURVNET_SET_QUIET(KGSS_TD_TO_VNET(curthread)); clnt_destroy(KGSS_VNET(kgss_gssd_handle)); KGSS_CURVNET_RESTORE(); } int kgss_oid_equal(const gss_OID oid1, const gss_OID oid2) { if (oid1 == oid2) return (1); if (!oid1 || !oid2) return (0); if (oid1->length != oid2->length) return (0); if (memcmp(oid1->elements, oid2->elements, oid1->length)) return (0); return (1); } void kgss_install_mech(gss_OID mech_type, const char *name, struct kobj_class *cls) { struct kgss_mech *km; km = malloc(sizeof(struct kgss_mech), M_GSSAPI, M_WAITOK); km->km_mech_type = mech_type; km->km_mech_name = name; km->km_class = cls; LIST_INSERT_HEAD(&kgss_mechs, km, km_link); } void kgss_uninstall_mech(gss_OID mech_type) { struct kgss_mech *km; LIST_FOREACH(km, &kgss_mechs, km_link) { if (kgss_oid_equal(km->km_mech_type, mech_type)) { LIST_REMOVE(km, km_link); free(km, M_GSSAPI); return; } } } gss_OID kgss_find_mech_by_name(const char *name) { struct kgss_mech *km; LIST_FOREACH(km, &kgss_mechs, km_link) { if (!strcmp(km->km_mech_name, name)) { return (km->km_mech_type); } } return (GSS_C_NO_OID); } const char * kgss_find_mech_by_oid(const gss_OID oid) { struct kgss_mech *km; LIST_FOREACH(km, &kgss_mechs, km_link) { if (kgss_oid_equal(km->km_mech_type, oid)) { return (km->km_mech_name); } } return (NULL); } gss_ctx_id_t kgss_create_context(gss_OID mech_type) { struct kgss_mech *km; gss_ctx_id_t ctx; LIST_FOREACH(km, &kgss_mechs, km_link) { if (kgss_oid_equal(km->km_mech_type, mech_type)) break; } if (!km) return (NULL); ctx = (gss_ctx_id_t) kobj_create(km->km_class, M_GSSAPI, M_WAITOK); KGSS_INIT(ctx); return (ctx); } void kgss_delete_context(gss_ctx_id_t ctx, gss_buffer_t output_token) { KGSS_DELETE(ctx, output_token); kobj_delete((kobj_t) ctx, M_GSSAPI); } OM_uint32 -kgss_transfer_context(gss_ctx_id_t ctx) +kgss_transfer_context(gss_ctx_id_t ctx, void *lctx) { struct export_sec_context_res res; struct export_sec_context_args args; enum clnt_stat stat; OM_uint32 maj_stat; + if (lctx != NULL) { + maj_stat = KGSS_IMPORT(ctx, MIT_V1, lctx); + ctx->handle = 0; + return (maj_stat); + } + KGSS_CURVNET_SET_QUIET(KGSS_TD_TO_VNET(curthread)); if (!KGSS_VNET(kgss_gssd_handle)) { KGSS_CURVNET_RESTORE(); return (GSS_S_FAILURE); } args.ctx = ctx->handle; bzero(&res, sizeof(res)); stat = gssd_export_sec_context_1(&args, &res, KGSS_VNET(kgss_gssd_handle)); KGSS_CURVNET_RESTORE(); if (stat != RPC_SUCCESS) { return (GSS_S_FAILURE); } maj_stat = KGSS_IMPORT(ctx, res.format, &res.interprocess_token); ctx->handle = 0; xdr_free((xdrproc_t) xdr_export_sec_context_res, &res); return (maj_stat); } void kgss_copy_buffer(const gss_buffer_t from, gss_buffer_t to) { to->length = from->length; if (from->length) { to->value = malloc(from->length, M_GSSAPI, M_WAITOK); bcopy(from->value, to->value, from->length); } else { to->value = NULL; } } /* * Acquire the kgss_gssd_handle and return it with a reference count, * if it is available. */ CLIENT * kgss_gssd_client(void) { CLIENT *cl; KGSS_CURVNET_SET_QUIET(KGSS_TD_TO_VNET(curthread)); mtx_lock(&kgss_gssd_lock); cl = KGSS_VNET(kgss_gssd_handle); if (cl != NULL) CLNT_ACQUIRE(cl); mtx_unlock(&kgss_gssd_lock); KGSS_CURVNET_RESTORE(); return (cl); } /* * Kernel module glue */ static int kgssapi_modevent(module_t mod, int type, void *data) { int error = 0; switch (type) { case MOD_LOAD: rpc_gss_entries.rpc_gss_refresh_auth = rpc_gss_refresh_auth; rpc_gss_entries.rpc_gss_secfind = rpc_gss_secfind; rpc_gss_entries.rpc_gss_secpurge = rpc_gss_secpurge; rpc_gss_entries.rpc_gss_seccreate = rpc_gss_seccreate; rpc_gss_entries.rpc_gss_set_defaults = rpc_gss_set_defaults; rpc_gss_entries.rpc_gss_max_data_length = rpc_gss_max_data_length; rpc_gss_entries.rpc_gss_get_error = rpc_gss_get_error; rpc_gss_entries.rpc_gss_mech_to_oid = rpc_gss_mech_to_oid; rpc_gss_entries.rpc_gss_oid_to_mech = rpc_gss_oid_to_mech; rpc_gss_entries.rpc_gss_qop_to_num = rpc_gss_qop_to_num; rpc_gss_entries.rpc_gss_get_mechanisms = rpc_gss_get_mechanisms; rpc_gss_entries.rpc_gss_get_versions = rpc_gss_get_versions; rpc_gss_entries.rpc_gss_is_installed = rpc_gss_is_installed; rpc_gss_entries.rpc_gss_set_svc_name = rpc_gss_set_svc_name; rpc_gss_entries.rpc_gss_clear_svc_name = rpc_gss_clear_svc_name; rpc_gss_entries.rpc_gss_getcred = rpc_gss_getcred; rpc_gss_entries.rpc_gss_set_callback = rpc_gss_set_callback; rpc_gss_entries.rpc_gss_clear_callback = rpc_gss_clear_callback; rpc_gss_entries.rpc_gss_get_principal_name = rpc_gss_get_principal_name; rpc_gss_entries.rpc_gss_svc_max_data_length = rpc_gss_svc_max_data_length; rpc_gss_entries.rpc_gss_ip_to_srv_principal = rpc_gss_ip_to_srv_principal; mtx_init(&kgss_gssd_lock, "kgss_gssd_lock", NULL, MTX_DEF); error = kgss_load(); break; case MOD_UNLOAD: kgss_unload(); mtx_destroy(&kgss_gssd_lock); /* * Unloading of the kgssapi module is not currently supported. * If somebody wants this, we would need to keep track of * currently executing threads and make sure the count is 0. */ /* FALLTHROUGH */ default: error = EOPNOTSUPP; } return (error); } static moduledata_t kgssapi_mod = { "kgssapi", kgssapi_modevent, NULL, }; DECLARE_MODULE(kgssapi, kgssapi_mod, SI_SUB_VFS, SI_ORDER_SECOND); MODULE_DEPEND(kgssapi, xdr, 1, 1, 1); MODULE_DEPEND(kgssapi, krpc, 1, 1, 1); MODULE_VERSION(kgssapi, 1); diff --git a/sys/kgssapi/gss_init_sec_context.c b/sys/kgssapi/gss_init_sec_context.c index fa0d3fb2ae19..a0f48fda8b29 100644 --- a/sys/kgssapi/gss_init_sec_context.c +++ b/sys/kgssapi/gss_init_sec_context.c @@ -1,139 +1,282 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include "gssd.h" #include "kgss_if.h" +/* + * This function should only be called when the gssd + * daemon running on the system is an old one that + * does not use gss_krb5_export_lucid_sec_context(). + */ OM_uint32 gss_init_sec_context(OM_uint32 * minor_status, const gss_cred_id_t initiator_cred_handle, gss_ctx_id_t * context_handle, const gss_name_t target_name, const gss_OID input_mech_type, OM_uint32 req_flags, OM_uint32 time_req, const gss_channel_bindings_t input_chan_bindings, const gss_buffer_t input_token, gss_OID * actual_mech_type, gss_buffer_t output_token, OM_uint32 * ret_flags, OM_uint32 * time_rec) { struct init_sec_context_res res; struct init_sec_context_args args; enum clnt_stat stat; gss_ctx_id_t ctx = *context_handle; CLIENT *cl; *minor_status = 0; cl = kgss_gssd_client(); if (cl == NULL) return (GSS_S_FAILURE); args.uid = curthread->td_ucred->cr_uid; if (initiator_cred_handle) args.cred = initiator_cred_handle->handle; else args.cred = 0; if (ctx) args.ctx = ctx->handle; else args.ctx = 0; args.name = target_name->handle; args.mech_type = input_mech_type; args.req_flags = req_flags; args.time_req = time_req; args.input_chan_bindings = input_chan_bindings; if (input_token) args.input_token = *input_token; else { args.input_token.length = 0; args.input_token.value = NULL; } bzero(&res, sizeof(res)); stat = gssd_init_sec_context_1(&args, &res, cl); CLNT_RELEASE(cl); if (stat != RPC_SUCCESS) { *minor_status = stat; return (GSS_S_FAILURE); } if (res.major_status != GSS_S_COMPLETE && res.major_status != GSS_S_CONTINUE_NEEDED) { *minor_status = res.minor_status; xdr_free((xdrproc_t) xdr_init_sec_context_res, &res); return (res.major_status); } *minor_status = res.minor_status; if (!ctx) { ctx = kgss_create_context(res.actual_mech_type); if (!ctx) { xdr_free((xdrproc_t) xdr_init_sec_context_res, &res); *minor_status = 0; return (GSS_S_BAD_MECH); } } *context_handle = ctx; ctx->handle = res.ctx; if (actual_mech_type) *actual_mech_type = KGSS_MECH_TYPE(ctx); kgss_copy_buffer(&res.output_token, output_token); if (ret_flags) *ret_flags = res.ret_flags; if (time_rec) *time_rec = res.time_rec; xdr_free((xdrproc_t) xdr_init_sec_context_res, &res); /* * If the context establishment is complete, export it from * userland and hand the result (which includes key material * etc.) to the kernel implementation. */ if (res.major_status == GSS_S_COMPLETE) - res.major_status = kgss_transfer_context(ctx); + res.major_status = kgss_transfer_context(ctx, NULL); + + return (res.major_status); +} + +OM_uint32 +gss_supports_lucid(uint32_t *minor_status, uint32_t *vers) +{ + struct supports_lucid_res res; + enum clnt_stat stat; + CLIENT *cl; + + *minor_status = 0; + + cl = kgss_gssd_client(); + if (cl == NULL) + return (GSS_S_FAILURE); + + bzero(&res, sizeof(res)); + stat = gssd_supports_lucid_1(NULL, &res, cl); + CLNT_RELEASE(cl); + if (stat != RPC_SUCCESS) { + *minor_status = stat; + return (GSS_S_FAILURE); + } + + if (vers) + *vers = res.vers; + + return (res.major_status); +} + +/* + * This function should be called when the gssd daemon is + * one that uses gss_krb5_export_lucid_sec_context(). + * There is a lot of code common with + * gss_init_sec_context(). However, the structures used + * are not the same and future changes may be needed for + * this one. As such, I have not factored out the common + * code. + * gss_supports_lucid() may be used to check to see if the + * gssd daemon uses gss_krb5_export_lucid_sec_context(). + */ +OM_uint32 +gss_init_sec_context_lucid_v1(OM_uint32 * minor_status, + const gss_cred_id_t initiator_cred_handle, + gss_ctx_id_t * context_handle, + const gss_name_t target_name, + const gss_OID input_mech_type, + OM_uint32 req_flags, + OM_uint32 time_req, + const gss_channel_bindings_t input_chan_bindings, + const gss_buffer_t input_token, + gss_OID * actual_mech_type, + gss_buffer_t output_token, + OM_uint32 * ret_flags, + OM_uint32 * time_rec) +{ + struct init_sec_context_lucid_v1_res res; + struct init_sec_context_lucid_v1_args args; + enum clnt_stat stat; + gss_ctx_id_t ctx = *context_handle; + CLIENT *cl; + + *minor_status = 0; + + cl = kgss_gssd_client(); + if (cl == NULL) + return (GSS_S_FAILURE); + + args.uid = curthread->td_ucred->cr_uid; + if (initiator_cred_handle) + args.cred = initiator_cred_handle->handle; + else + args.cred = 0; + if (ctx) + args.ctx = ctx->handle; + else + args.ctx = 0; + args.name = target_name->handle; + args.mech_type = input_mech_type; + args.req_flags = req_flags; + args.time_req = time_req; + args.input_chan_bindings = input_chan_bindings; + if (input_token) + args.input_token = *input_token; + else { + args.input_token.length = 0; + args.input_token.value = NULL; + } + + bzero(&res, sizeof(res)); + stat = gssd_init_sec_context_lucid_v1_1(&args, &res, cl); + CLNT_RELEASE(cl); + if (stat != RPC_SUCCESS) { + *minor_status = stat; + return (GSS_S_FAILURE); + } + + if (res.major_status != GSS_S_COMPLETE + && res.major_status != GSS_S_CONTINUE_NEEDED) { + *minor_status = res.minor_status; + xdr_free((xdrproc_t) xdr_init_sec_context_lucid_v1_res, &res); + return (res.major_status); + } + + *minor_status = res.minor_status; + + if (!ctx) { + ctx = kgss_create_context(res.actual_mech_type); + if (!ctx) { + xdr_free((xdrproc_t) xdr_init_sec_context_lucid_v1_res, &res); + *minor_status = 0; + return (GSS_S_BAD_MECH); + } + } + *context_handle = ctx; + ctx->handle = res.ctx; + if (actual_mech_type) + *actual_mech_type = KGSS_MECH_TYPE(ctx); + kgss_copy_buffer(&res.output_token, output_token); + if (ret_flags) + *ret_flags = res.ret_flags; + if (time_rec) + *time_rec = res.time_rec; + + /* + * If the context establishment is complete, export it from + * userland and hand the result (which includes key material + * etc.) to the kernel implementation. + */ + if (res.major_status == GSS_S_COMPLETE) { + res.major_status = kgss_transfer_context(ctx, &res.lucid); + if (res.major_status != GSS_S_COMPLETE) + printf("gss_init_sec_context_lucid_v1: " + "transfer failed\n"); + } + + xdr_free((xdrproc_t) xdr_init_sec_context_lucid_v1_res, &res); return (res.major_status); } diff --git a/sys/kgssapi/gssapi.h b/sys/kgssapi/gssapi.h index 37cc8a1a5a09..cd4a4b508cc5 100644 --- a/sys/kgssapi/gssapi.h +++ b/sys/kgssapi/gssapi.h @@ -1,635 +1,677 @@ /* * Copyright (C) The Internet Society (2000). All Rights Reserved. * * This document and translations of it may be copied and furnished to * others, and derivative works that comment on or otherwise explain it * or assist in its implementation may be prepared, copied, published * and distributed, in whole or in part, without restriction of any * kind, provided that the above copyright notice and this paragraph are * included on all such copies and derivative works. However, this * document itself may not be modified in any way, such as by removing * the copyright notice or references to the Internet Society or other * Internet organizations, except as needed for the purpose of * developing Internet standards in which case the procedures for * copyrights defined in the Internet Standards process must be * followed, or as required to translate it into languages other than * English. * * The limited permissions granted above are perpetual and will not be * revoked by the Internet Society or its successors or assigns. * * This document and the information contained herein is provided on an * "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING * TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION * HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. */ #ifndef _KGSSAPI_GSSAPI_H_ #define _KGSSAPI_GSSAPI_H_ /* * A cut-down version of the GSS-API for in-kernel use */ /* * Now define the three implementation-dependent types. */ typedef struct _gss_ctx_id_t *gss_ctx_id_t; typedef struct _gss_cred_id_t *gss_cred_id_t; typedef struct _gss_name_t *gss_name_t; /* * We can't use X/Open definitions, so roll our own. */ typedef uint32_t OM_uint32; typedef uint64_t OM_uint64; typedef struct gss_OID_desc_struct { OM_uint32 length; void *elements; } gss_OID_desc, *gss_OID; typedef struct gss_OID_set_desc_struct { size_t count; gss_OID elements; } gss_OID_set_desc, *gss_OID_set; typedef struct gss_buffer_desc_struct { size_t length; void *value; } gss_buffer_desc, *gss_buffer_t; typedef struct gss_channel_bindings_struct { OM_uint32 initiator_addrtype; gss_buffer_desc initiator_address; OM_uint32 acceptor_addrtype; gss_buffer_desc acceptor_address; gss_buffer_desc application_data; } *gss_channel_bindings_t; /* * For now, define a QOP-type as an OM_uint32 */ typedef OM_uint32 gss_qop_t; typedef int gss_cred_usage_t; /* * Flag bits for context-level services. */ #define GSS_C_DELEG_FLAG 1 #define GSS_C_MUTUAL_FLAG 2 #define GSS_C_REPLAY_FLAG 4 #define GSS_C_SEQUENCE_FLAG 8 #define GSS_C_CONF_FLAG 16 #define GSS_C_INTEG_FLAG 32 #define GSS_C_ANON_FLAG 64 #define GSS_C_PROT_READY_FLAG 128 #define GSS_C_TRANS_FLAG 256 /* * Credential usage options */ #define GSS_C_BOTH 0 #define GSS_C_INITIATE 1 #define GSS_C_ACCEPT 2 /* * Status code types for gss_display_status */ #define GSS_C_GSS_CODE 1 #define GSS_C_MECH_CODE 2 /* * The constant definitions for channel-bindings address families */ #define GSS_C_AF_UNSPEC 0 #define GSS_C_AF_LOCAL 1 #define GSS_C_AF_INET 2 #define GSS_C_AF_IMPLINK 3 #define GSS_C_AF_PUP 4 #define GSS_C_AF_CHAOS 5 #define GSS_C_AF_NS 6 #define GSS_C_AF_NBS 7 #define GSS_C_AF_ECMA 8 #define GSS_C_AF_DATAKIT 9 #define GSS_C_AF_CCITT 10 #define GSS_C_AF_SNA 11 #define GSS_C_AF_DECnet 12 #define GSS_C_AF_DLI 13 #define GSS_C_AF_LAT 14 #define GSS_C_AF_HYLINK 15 #define GSS_C_AF_APPLETALK 16 #define GSS_C_AF_BSC 17 #define GSS_C_AF_DSS 18 #define GSS_C_AF_OSI 19 #define GSS_C_AF_X25 21 #define GSS_C_AF_NULLADDR 255 /* * Various Null values */ #define GSS_C_NO_NAME ((gss_name_t) 0) #define GSS_C_NO_BUFFER ((gss_buffer_t) 0) #define GSS_C_NO_OID ((gss_OID) 0) #define GSS_C_NO_OID_SET ((gss_OID_set) 0) #define GSS_C_NO_CONTEXT ((gss_ctx_id_t) 0) #define GSS_C_NO_CREDENTIAL ((gss_cred_id_t) 0) #define GSS_C_NO_CHANNEL_BINDINGS ((gss_channel_bindings_t) 0) #define GSS_C_EMPTY_BUFFER {0, NULL} /* * Some alternate names for a couple of the above * values. These are defined for V1 compatibility. */ #define GSS_C_NULL_OID GSS_C_NO_OID #define GSS_C_NULL_OID_SET GSS_C_NO_OID_SET /* * Define the default Quality of Protection for per-message * services. Note that an implementation that offers multiple * levels of QOP may define GSS_C_QOP_DEFAULT to be either zero * (as done here) to mean "default protection", or to a specific * explicit QOP value. However, a value of 0 should always be * interpreted by a GSS-API implementation as a request for the * default protection level. */ #define GSS_C_QOP_DEFAULT 0 /* * Expiration time of 2^32-1 seconds means infinite lifetime for a * credential or security context */ #define GSS_C_INDEFINITE 0xfffffffful /* * The implementation must reserve static storage for a * gss_OID_desc object containing the value * {10, (void *)"\x2a\x86\x48\x86\xf7\x12" * "\x01\x02\x01\x01"}, * corresponding to an object-identifier value of * {iso(1) member-body(2) United States(840) mit(113554) * infosys(1) gssapi(2) generic(1) user_name(1)}. The constant * GSS_C_NT_USER_NAME should be initialized to point * to that gss_OID_desc. */ extern gss_OID GSS_C_NT_USER_NAME; /* * The implementation must reserve static storage for a * gss_OID_desc object containing the value * {10, (void *)"\x2a\x86\x48\x86\xf7\x12" * "\x01\x02\x01\x02"}, * corresponding to an object-identifier value of * {iso(1) member-body(2) United States(840) mit(113554) * infosys(1) gssapi(2) generic(1) machine_uid_name(2)}. * The constant GSS_C_NT_MACHINE_UID_NAME should be * initialized to point to that gss_OID_desc. */ extern gss_OID GSS_C_NT_MACHINE_UID_NAME; /* * The implementation must reserve static storage for a * gss_OID_desc object containing the value * {10, (void *)"\x2a\x86\x48\x86\xf7\x12" * "\x01\x02\x01\x03"}, * corresponding to an object-identifier value of * {iso(1) member-body(2) United States(840) mit(113554) * infosys(1) gssapi(2) generic(1) string_uid_name(3)}. * The constant GSS_C_NT_STRING_UID_NAME should be * initialized to point to that gss_OID_desc. */ extern gss_OID GSS_C_NT_STRING_UID_NAME; /* * The implementation must reserve static storage for a * gss_OID_desc object containing the value * {6, (void *)"\x2b\x06\x01\x05\x06\x02"}, * corresponding to an object-identifier value of * {iso(1) org(3) dod(6) internet(1) security(5) * nametypes(6) gss-host-based-services(2)). The constant * GSS_C_NT_HOSTBASED_SERVICE_X should be initialized to point * to that gss_OID_desc. This is a deprecated OID value, and * implementations wishing to support hostbased-service names * should instead use the GSS_C_NT_HOSTBASED_SERVICE OID, * defined below, to identify such names; * GSS_C_NT_HOSTBASED_SERVICE_X should be accepted a synonym * for GSS_C_NT_HOSTBASED_SERVICE when presented as an input * parameter, but should not be emitted by GSS-API * implementations */ extern gss_OID GSS_C_NT_HOSTBASED_SERVICE_X; /* * The implementation must reserve static storage for a * gss_OID_desc object containing the value * {10, (void *)"\x2a\x86\x48\x86\xf7\x12" * "\x01\x02\x01\x04"}, corresponding to an * object-identifier value of {iso(1) member-body(2) * Unites States(840) mit(113554) infosys(1) gssapi(2) * generic(1) service_name(4)}. The constant * GSS_C_NT_HOSTBASED_SERVICE should be initialized * to point to that gss_OID_desc. */ extern gss_OID GSS_C_NT_HOSTBASED_SERVICE; /* * The implementation must reserve static storage for a * gss_OID_desc object containing the value * {6, (void *)"\x2b\x06\01\x05\x06\x03"}, * corresponding to an object identifier value of * {1(iso), 3(org), 6(dod), 1(internet), 5(security), * 6(nametypes), 3(gss-anonymous-name)}. The constant * and GSS_C_NT_ANONYMOUS should be initialized to point * to that gss_OID_desc. */ extern gss_OID GSS_C_NT_ANONYMOUS; /* * The implementation must reserve static storage for a * gss_OID_desc object containing the value * {6, (void *)"\x2b\x06\x01\x05\x06\x04"}, * corresponding to an object-identifier value of * {1(iso), 3(org), 6(dod), 1(internet), 5(security), * 6(nametypes), 4(gss-api-exported-name)}. The constant * GSS_C_NT_EXPORT_NAME should be initialized to point * to that gss_OID_desc. */ extern gss_OID GSS_C_NT_EXPORT_NAME; /* * This name form shall be represented by the Object Identifier {iso(1) * member-body(2) United States(840) mit(113554) infosys(1) gssapi(2) * krb5(2) krb5_name(1)}. The recommended symbolic name for this type * is "GSS_KRB5_NT_PRINCIPAL_NAME". */ extern gss_OID GSS_KRB5_NT_PRINCIPAL_NAME; /* * This name form shall be represented by the Object Identifier {iso(1) * member-body(2) United States(840) mit(113554) infosys(1) gssapi(2) * generic(1) user_name(1)}. The recommended symbolic name for this * type is "GSS_KRB5_NT_USER_NAME". */ extern gss_OID GSS_KRB5_NT_USER_NAME; /* * This name form shall be represented by the Object Identifier {iso(1) * member-body(2) United States(840) mit(113554) infosys(1) gssapi(2) * generic(1) machine_uid_name(2)}. The recommended symbolic name for * this type is "GSS_KRB5_NT_MACHINE_UID_NAME". */ extern gss_OID GSS_KRB5_NT_MACHINE_UID_NAME; /* * This name form shall be represented by the Object Identifier {iso(1) * member-body(2) United States(840) mit(113554) infosys(1) gssapi(2) * generic(1) string_uid_name(3)}. The recommended symbolic name for * this type is "GSS_KRB5_NT_STRING_UID_NAME". */ extern gss_OID GSS_KRB5_NT_STRING_UID_NAME; /* Major status codes */ #define GSS_S_COMPLETE 0 /* * Some "helper" definitions to make the status code macros obvious. */ #define GSS_C_CALLING_ERROR_OFFSET 24 #define GSS_C_ROUTINE_ERROR_OFFSET 16 #define GSS_C_SUPPLEMENTARY_OFFSET 0 #define GSS_C_CALLING_ERROR_MASK 0377ul #define GSS_C_ROUTINE_ERROR_MASK 0377ul #define GSS_C_SUPPLEMENTARY_MASK 0177777ul /* * The macros that test status codes for error conditions. * Note that the GSS_ERROR() macro has changed slightly from * the V1 GSS-API so that it now evaluates its argument * only once. */ #define GSS_CALLING_ERROR(x) \ (x & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET)) #define GSS_ROUTINE_ERROR(x) \ (x & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)) #define GSS_SUPPLEMENTARY_INFO(x) \ (x & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET)) #define GSS_ERROR(x) \ (x & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \ (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))) /* * Now the actual status code definitions */ /* * Calling errors: */ #define GSS_S_CALL_INACCESSIBLE_READ \ (1ul << GSS_C_CALLING_ERROR_OFFSET) #define GSS_S_CALL_INACCESSIBLE_WRITE \ (2ul << GSS_C_CALLING_ERROR_OFFSET) #define GSS_S_CALL_BAD_STRUCTURE \ (3ul << GSS_C_CALLING_ERROR_OFFSET) /* * Routine errors: */ #define GSS_S_BAD_MECH (1ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_NAME (2ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_NAMETYPE (3ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_BINDINGS (4ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_STATUS (5ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_SIG (6ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_MIC GSS_S_BAD_SIG #define GSS_S_NO_CRED (7ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_NO_CONTEXT (8ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_DEFECTIVE_TOKEN (9ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_DEFECTIVE_CREDENTIAL (10ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_CREDENTIALS_EXPIRED (11ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_CONTEXT_EXPIRED (12ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_FAILURE (13ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_QOP (14ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_UNAUTHORIZED (15ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_UNAVAILABLE (16ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_DUPLICATE_ELEMENT (17ul << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_NAME_NOT_MN (18ul << GSS_C_ROUTINE_ERROR_OFFSET) /* * Supplementary info bits: */ #define GSS_S_CONTINUE_NEEDED \ (1ul << (GSS_C_SUPPLEMENTARY_OFFSET + 0)) #define GSS_S_DUPLICATE_TOKEN \ (1ul << (GSS_C_SUPPLEMENTARY_OFFSET + 1)) #define GSS_S_OLD_TOKEN \ (1ul << (GSS_C_SUPPLEMENTARY_OFFSET + 2)) #define GSS_S_UNSEQ_TOKEN \ (1ul << (GSS_C_SUPPLEMENTARY_OFFSET + 3)) #define GSS_S_GAP_TOKEN \ (1ul << (GSS_C_SUPPLEMENTARY_OFFSET + 4)) /* * NI_MAXSERV and NI_MAXHOST. The srv_principal argument for * rpc_gss_ip_to_srv_principal should point to at least * NI_MAXSERV + NI_MAXHOST + 1 bytes of storage. The "+ 1" is for the '@'. * The NI_MAXHOST limit is checked for gss_ip_to_dns(). * These should be set to the same value as they are in . */ #ifndef NI_MAXHOST #define NI_MAXSERV 32 #define NI_MAXHOST 1025 #endif __BEGIN_DECLS /* * Finally, function prototypes for the GSS-API routines. */ OM_uint32 gss_acquire_cred (OM_uint32 *, /* minor_status */ const gss_name_t, /* desired_name */ OM_uint32, /* time_req */ const gss_OID_set, /* desired_mechs */ gss_cred_usage_t, /* cred_usage */ gss_cred_id_t *, /* output_cred_handle */ gss_OID_set *, /* actual_mechs */ OM_uint32 * /* time_rec */ ); OM_uint32 gss_release_cred (OM_uint32 *, /* minor_status */ gss_cred_id_t * /* cred_handle */ ); OM_uint32 gss_init_sec_context (OM_uint32 *, /* minor_status */ const gss_cred_id_t, /* initiator_cred_handle */ gss_ctx_id_t *, /* context_handle */ const gss_name_t, /* target_name */ const gss_OID, /* mech_type */ OM_uint32, /* req_flags */ OM_uint32, /* time_req */ const gss_channel_bindings_t, /* input_chan_bindings */ const gss_buffer_t, /* input_token */ gss_OID *, /* actual_mech_type */ gss_buffer_t, /* output_token */ OM_uint32 *, /* ret_flags */ OM_uint32 * /* time_rec */ ); +OM_uint32 gss_init_sec_context_lucid_v1 + (OM_uint32 *, /* minor_status */ + const gss_cred_id_t, /* initiator_cred_handle */ + gss_ctx_id_t *, /* context_handle */ + const gss_name_t, /* target_name */ + const gss_OID, /* mech_type */ + OM_uint32, /* req_flags */ + OM_uint32, /* time_req */ + const gss_channel_bindings_t, + /* input_chan_bindings */ + const gss_buffer_t, /* input_token */ + gss_OID *, /* actual_mech_type */ + gss_buffer_t, /* output_token */ + OM_uint32 *, /* ret_flags */ + OM_uint32 * /* time_rec */ + ); + +OM_uint32 gss_supports_lucid + (OM_uint32 *, /* minor_status */ + OM_uint32 * /* vers */ + ); + OM_uint32 gss_accept_sec_context (OM_uint32 *, /* minor_status */ gss_ctx_id_t *, /* context_handle */ const gss_cred_id_t, /* acceptor_cred_handle */ const gss_buffer_t, /* input_token_buffer */ const gss_channel_bindings_t, /* input_chan_bindings */ gss_name_t *, /* src_name */ gss_OID *, /* mech_type */ gss_buffer_t, /* output_token */ OM_uint32 *, /* ret_flags */ OM_uint32 *, /* time_rec */ gss_cred_id_t * /* delegated_cred_handle */ ); +OM_uint32 gss_accept_sec_context_lucid_v1 + (OM_uint32 *, /* minor_status */ + gss_ctx_id_t *, /* context_handle */ + const gss_cred_id_t, /* acceptor_cred_handle */ + const gss_buffer_t, /* input_token_buffer */ + const gss_channel_bindings_t, + /* input_chan_bindings */ + gss_name_t *, /* src_name */ + gss_OID *, /* mech_type */ + gss_buffer_t, /* output_token */ + OM_uint32 *, /* ret_flags */ + OM_uint32 *, /* time_rec */ + gss_cred_id_t *, /* delegated_cred_handle */ + gss_buffer_t, /* exported_name */ + uid_t *, /* Unix cred */ + gid_t *, + int *, /* Number of groups */ + gid_t * /* groups list */ + ); + OM_uint32 gss_delete_sec_context (OM_uint32 *, /* minor_status */ gss_ctx_id_t *, /* context_handle */ gss_buffer_t /* output_token */ ); OM_uint32 gss_get_mic (OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ gss_qop_t, /* qop_req */ const gss_buffer_t, /* message_buffer */ gss_buffer_t /* message_token */ ); OM_uint32 gss_verify_mic (OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ const gss_buffer_t, /* message_buffer */ const gss_buffer_t, /* token_buffer */ gss_qop_t * /* qop_state */ ); OM_uint32 gss_wrap (OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ int, /* conf_req_flag */ gss_qop_t, /* qop_req */ const gss_buffer_t, /* input_message_buffer */ int *, /* conf_state */ gss_buffer_t /* output_message_buffer */ ); OM_uint32 gss_unwrap (OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ const gss_buffer_t, /* input_message_buffer */ gss_buffer_t, /* output_message_buffer */ int *, /* conf_state */ gss_qop_t * /* qop_state */ ); OM_uint32 gss_display_status (OM_uint32 *, /* minor_status */ OM_uint32, /* status_value */ int, /* status_type */ const gss_OID, /* mech_type */ OM_uint32 *, /* message_context */ gss_buffer_t /* status_string */ ); OM_uint32 gss_import_name (OM_uint32 *, /* minor_status */ const gss_buffer_t, /* input_name_buffer */ const gss_OID, /* input_name_type */ gss_name_t * /* output_name */ ); OM_uint32 gss_export_name (OM_uint32 *, /* minor_status */ const gss_name_t, /* input_name */ gss_buffer_t /* exported_name */ ); OM_uint32 gss_release_name (OM_uint32 *, /* minor_status */ gss_name_t * /* input_name */ ); OM_uint32 gss_release_buffer (OM_uint32 *, /* minor_status */ gss_buffer_t /* buffer */ ); OM_uint32 gss_release_oid_set (OM_uint32 *, /* minor_status */ gss_OID_set * /* set */ ); OM_uint32 gss_wrap_size_limit ( OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ int, /* conf_req_flag */ gss_qop_t, /* qop_req */ OM_uint32, /* req_output_size */ OM_uint32 * /* max_input_size */ ); OM_uint32 gss_create_empty_oid_set ( OM_uint32 *, /* minor_status */ gss_OID_set * /* oid_set */ ); OM_uint32 gss_add_oid_set_member ( OM_uint32 *, /* minor_status */ const gss_OID, /* member_oid */ gss_OID_set * /* oid_set */ ); OM_uint32 gss_test_oid_set_member ( OM_uint32 *, /* minor_status */ const gss_OID, /* member */ const gss_OID_set, /* set */ int * /* present */ ); OM_uint32 gss_canonicalize_name ( OM_uint32 *, /* minor_status */ const gss_name_t, /* input_name */ const gss_OID, /* mech_type */ gss_name_t * /* output_name */ ); /* * Other extensions and helper functions. */ OM_uint32 gss_set_cred_option (OM_uint32 *, /* minor status */ gss_cred_id_t *, /* cred */ const gss_OID, /* option to set */ const gss_buffer_t /* option value */ ); OM_uint32 gss_pname_to_uid (OM_uint32 *, /* minor status */ const gss_name_t pname, /* principal name */ const gss_OID mech, /* mechanism to query */ uid_t *uidp /* pointer to UID for result */ ); /* * On entry, *numgroups is set to the maximum number of groups to return. On exit, *numgroups is set to the actual number of groups returned. */ OM_uint32 gss_pname_to_unix_cred (OM_uint32 *, /* minor status */ const gss_name_t pname, /* principal name */ const gss_OID mech, /* mechanism to query */ uid_t *uidp, /* pointer to UID for result */ gid_t *gidp, /* pointer to GID for result */ int *numgroups, /* number of groups */ gid_t *groups /* pointer to group list */ ); OM_uint32 gss_ip_to_dns (OM_uint32 *, /* minor status */ char *ip_addr, /* IP host address string */ char *dns_name /* pointer to dns_name for result */ ); /* * Mbuf oriented message signing and encryption. * * Get_mic allocates an mbuf to hold the message checksum. Verify_mic * may modify the passed-in mic but will not free it. * * Wrap and unwrap * consume the message and generate a new mbuf chain with the * result. The original message is freed on error. */ struct mbuf; OM_uint32 gss_get_mic_mbuf (OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ gss_qop_t, /* qop_req */ struct mbuf *, /* message_buffer */ struct mbuf ** /* message_token */ ); OM_uint32 gss_verify_mic_mbuf (OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ struct mbuf *, /* message_buffer */ struct mbuf *, /* token_buffer */ gss_qop_t * /* qop_state */ ); OM_uint32 gss_wrap_mbuf (OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ int, /* conf_req_flag */ gss_qop_t, /* qop_req */ struct mbuf **, /* message_buffer */ int * /* conf_state */ ); OM_uint32 gss_unwrap_mbuf (OM_uint32 *, /* minor_status */ const gss_ctx_id_t, /* context_handle */ struct mbuf **, /* message_buffer */ int *, /* conf_state */ gss_qop_t * /* qop_state */ ); __END_DECLS #endif /* _KGSSAPI_GSSAPI_H_ */ diff --git a/sys/kgssapi/gssapi_impl.h b/sys/kgssapi/gssapi_impl.h index 3279dc8da122..d8a85f20a602 100644 --- a/sys/kgssapi/gssapi_impl.h +++ b/sys/kgssapi/gssapi_impl.h @@ -1,82 +1,82 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "gssd.h" MALLOC_DECLARE(M_GSSAPI); struct _gss_ctx_id_t { KOBJ_FIELDS; gssd_ctx_id_t handle; }; struct _gss_cred_id_t { gssd_cred_id_t handle; }; struct _gss_name_t { gssd_name_t handle; }; struct kgss_mech { LIST_ENTRY(kgss_mech) km_link; gss_OID km_mech_type; const char *km_mech_name; struct kobj_class *km_class; }; LIST_HEAD(kgss_mech_list, kgss_mech); /* Macros for VIMAGE. */ /* Just define the KGSS_VNETxxx() macros as VNETxxx() macros. */ #define KGSS_VNET_DEFINE(t, n) VNET_DEFINE(t, n) #define KGSS_VNET_DEFINE_STATIC(t, n) VNET_DEFINE_STATIC(t, n) #define KGSS_VNET_DECLARE(t, n) VNET_DECLARE(t, n) #define KGSS_VNET(n) VNET(n) #define KGSS_CURVNET_SET(n) CURVNET_SET(n) #define KGSS_CURVNET_SET_QUIET(n) CURVNET_SET_QUIET(n) #define KGSS_CURVNET_RESTORE() CURVNET_RESTORE() #define KGSS_TD_TO_VNET(n) TD_TO_VNET(n) extern struct mtx kgss_gssd_lock; extern struct kgss_mech_list kgss_mechs; KGSS_VNET_DECLARE(CLIENT *, kgss_gssd_handle); CLIENT *kgss_gssd_client(void); int kgss_oid_equal(const gss_OID oid1, const gss_OID oid2); extern void kgss_install_mech(gss_OID mech_type, const char *name, struct kobj_class *cls); extern void kgss_uninstall_mech(gss_OID mech_type); extern gss_OID kgss_find_mech_by_name(const char *name); extern const char *kgss_find_mech_by_oid(const gss_OID oid); extern gss_ctx_id_t kgss_create_context(gss_OID mech_type); extern void kgss_delete_context(gss_ctx_id_t ctx, gss_buffer_t output_token); -extern OM_uint32 kgss_transfer_context(gss_ctx_id_t ctx); +extern OM_uint32 kgss_transfer_context(gss_ctx_id_t ctx, void *lctx); extern void kgss_copy_buffer(const gss_buffer_t from, gss_buffer_t to); diff --git a/sys/kgssapi/gssd.x b/sys/kgssapi/gssd.x index b50f39b33554..bf63ba95f8df 100644 --- a/sys/kgssapi/gssd.x +++ b/sys/kgssapi/gssd.x @@ -1,278 +1,355 @@ /*- * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef RPC_HDR %#ifdef _KERNEL %#include %#else %#include %#include %#endif %extern bool_t xdr_gss_buffer_desc(XDR *xdrs, gss_buffer_desc *buf); %extern bool_t xdr_gss_OID_desc(XDR *xdrs, gss_OID_desc *oid); %extern bool_t xdr_gss_OID(XDR *xdrs, gss_OID *oidp); %extern bool_t xdr_gss_OID_set_desc(XDR *xdrs, gss_OID_set_desc *set); %extern bool_t xdr_gss_OID_set(XDR *xdrs, gss_OID_set *setp); %extern bool_t xdr_gss_channel_bindings_t(XDR *xdrs, gss_channel_bindings_t *chp); #endif typedef uint64_t gssd_ctx_id_t; typedef uint64_t gssd_cred_id_t; typedef uint64_t gssd_name_t; +struct kgss_lucid_desc { + uint32_t initiate; + uint32_t endtime; + uint64_t send_seq; + uint64_t recv_seq; + uint32_t protocol; + uint32_t rfc_sign; + uint32_t rfc_seal; + uint32_t have_subkey; + uint32_t ctx_type; + gss_buffer_desc ctx_key; + uint32_t subkey_type; + gss_buffer_desc subkey_key; +}; + struct init_sec_context_res { uint32_t major_status; uint32_t minor_status; gssd_ctx_id_t ctx; gss_OID actual_mech_type; gss_buffer_desc output_token; uint32_t ret_flags; uint32_t time_rec; }; struct init_sec_context_args { uint32_t uid; gssd_cred_id_t cred; gssd_ctx_id_t ctx; gssd_name_t name; gss_OID mech_type; uint32_t req_flags; uint32_t time_req; gss_channel_bindings_t input_chan_bindings; gss_buffer_desc input_token; }; +struct init_sec_context_lucid_v1_res { + uint32_t major_status; + uint32_t minor_status; + gssd_ctx_id_t ctx; + gss_OID actual_mech_type; + gss_buffer_desc output_token; + uint32_t ret_flags; + uint32_t time_rec; + kgss_lucid_desc lucid; +}; + +struct init_sec_context_lucid_v1_args { + uint32_t uid; + gssd_cred_id_t cred; + gssd_ctx_id_t ctx; + gssd_name_t name; + gss_OID mech_type; + uint32_t req_flags; + uint32_t time_req; + gss_channel_bindings_t input_chan_bindings; + gss_buffer_desc input_token; +}; + struct accept_sec_context_res { uint32_t major_status; uint32_t minor_status; gssd_ctx_id_t ctx; gssd_name_t src_name; gss_OID mech_type; gss_buffer_desc output_token; uint32_t ret_flags; uint32_t time_rec; gssd_cred_id_t delegated_cred_handle; }; struct accept_sec_context_args { gssd_ctx_id_t ctx; gssd_cred_id_t cred; gss_buffer_desc input_token; gss_channel_bindings_t input_chan_bindings; }; +struct accept_sec_context_lucid_v1_res { + uint32_t major_status; + uint32_t minor_status; + gssd_ctx_id_t ctx; + gssd_name_t src_name; + gss_OID mech_type; + gss_buffer_desc output_token; + uint32_t ret_flags; + uint32_t time_rec; + gssd_cred_id_t delegated_cred_handle; + kgss_lucid_desc lucid; + gss_buffer_desc exported_name; + uint32_t uid; + uint32_t gid; + uint32_t gidlist<>; +}; + +struct accept_sec_context_lucid_v1_args { + gssd_ctx_id_t ctx; + gssd_cred_id_t cred; + gss_buffer_desc input_token; + gss_channel_bindings_t input_chan_bindings; +}; + struct delete_sec_context_res { uint32_t major_status; uint32_t minor_status; gss_buffer_desc output_token; }; struct delete_sec_context_args { gssd_ctx_id_t ctx; }; enum sec_context_format { KGSS_HEIMDAL_0_6, - KGSS_HEIMDAL_1_1 + KGSS_HEIMDAL_1_1, + MIT_V1 }; struct export_sec_context_res { uint32_t major_status; uint32_t minor_status; enum sec_context_format format; gss_buffer_desc interprocess_token; }; struct export_sec_context_args { gssd_ctx_id_t ctx; }; struct import_name_res { uint32_t major_status; uint32_t minor_status; gssd_name_t output_name; }; struct import_name_args { gss_buffer_desc input_name_buffer; gss_OID input_name_type; }; struct canonicalize_name_res { uint32_t major_status; uint32_t minor_status; gssd_name_t output_name; }; struct canonicalize_name_args { gssd_name_t input_name; gss_OID mech_type; }; struct export_name_res { uint32_t major_status; uint32_t minor_status; gss_buffer_desc exported_name; }; struct export_name_args { gssd_name_t input_name; }; struct release_name_res { uint32_t major_status; uint32_t minor_status; }; struct release_name_args { gssd_name_t input_name; }; struct pname_to_uid_res { uint32_t major_status; uint32_t minor_status; uint32_t uid; uint32_t gid; uint32_t gidlist<>; }; struct pname_to_uid_args { gssd_name_t pname; gss_OID mech; }; struct acquire_cred_res { uint32_t major_status; uint32_t minor_status; gssd_cred_id_t output_cred; gss_OID_set actual_mechs; uint32_t time_rec; }; struct acquire_cred_args { uint32_t uid; gssd_name_t desired_name; uint32_t time_req; gss_OID_set desired_mechs; int cred_usage; }; struct set_cred_option_res { uint32_t major_status; uint32_t minor_status; }; struct set_cred_option_args { gssd_cred_id_t cred; gss_OID option_name; gss_buffer_desc option_value; }; struct release_cred_res { uint32_t major_status; uint32_t minor_status; }; struct release_cred_args { gssd_cred_id_t cred; }; struct display_status_res { uint32_t major_status; uint32_t minor_status; uint32_t message_context; gss_buffer_desc status_string; }; struct display_status_args { uint32_t status_value; int status_type; gss_OID mech_type; uint32_t message_context; }; struct ip_to_dns_res { uint32_t major_status; uint32_t minor_status; char dns_name; }; struct ip_to_dns_args { char ip_addr; }; +struct supports_lucid_res { + uint32_t major_status; + uint32_t vers; +}; + program GSSD { version GSSDVERS { void GSSD_NULL(void) = 0; init_sec_context_res GSSD_INIT_SEC_CONTEXT(init_sec_context_args) = 1; accept_sec_context_res GSSD_ACCEPT_SEC_CONTEXT(accept_sec_context_args) = 2; delete_sec_context_res GSSD_DELETE_SEC_CONTEXT(delete_sec_context_args) = 3; export_sec_context_res GSSD_EXPORT_SEC_CONTEXT(export_sec_context_args) = 4; import_name_res GSSD_IMPORT_NAME(import_name_args) = 5; canonicalize_name_res GSSD_CANONICALIZE_NAME(canonicalize_name_args) = 6; export_name_res GSSD_EXPORT_NAME(export_name_args) = 7; release_name_res GSSD_RELEASE_NAME(release_name_args) = 8; pname_to_uid_res GSSD_PNAME_TO_UID(pname_to_uid_args) = 9; acquire_cred_res GSSD_ACQUIRE_CRED(acquire_cred_args) = 10; set_cred_option_res GSSD_SET_CRED_OPTION(set_cred_option_args) = 11; release_cred_res GSSD_RELEASE_CRED(release_cred_args) = 12; display_status_res GSSD_DISPLAY_STATUS(display_status_args) = 13; ip_to_dns_res GSSD_IP_TO_DNS(ip_to_dns_args) = 14; + + init_sec_context_lucid_v1_res + GSSD_INIT_SEC_CONTEXT_LUCID_V1(init_sec_context_lucid_v1_args) = 15; + + accept_sec_context_lucid_v1_res + GSSD_ACCEPT_SEC_CONTEXT_LUCID_V1(accept_sec_context_lucid_v1_args) = 16; + + supports_lucid_res + GSSD_SUPPORTS_LUCID(void) = 17; } = 1; } = 0x40677373; diff --git a/sys/kgssapi/krb5/krb5_mech.c b/sys/kgssapi/krb5/krb5_mech.c index 0b8fbc90fcd1..59d5b120e4fb 100644 --- a/sys/kgssapi/krb5/krb5_mech.c +++ b/sys/kgssapi/krb5/krb5_mech.c @@ -1,2124 +1,2204 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include "kgss_if.h" #include "kcrypto.h" #define GSS_TOKEN_SENT_BY_ACCEPTOR 1 #define GSS_TOKEN_SEALED 2 #define GSS_TOKEN_ACCEPTOR_SUBKEY 4 static gss_OID_desc krb5_mech_oid = {9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" }; struct krb5_data { size_t kd_length; void *kd_data; }; struct krb5_keyblock { uint16_t kk_type; /* encryption type */ struct krb5_data kk_key; /* key data */ }; struct krb5_address { uint16_t ka_type; struct krb5_data ka_addr; }; /* * The km_elem array is ordered so that the highest received sequence * number is listed first. */ struct krb5_msg_order { uint32_t km_flags; uint32_t km_start; uint32_t km_length; uint32_t km_jitter_window; uint32_t km_first_seq; uint32_t *km_elem; }; struct krb5_context { struct _gss_ctx_id_t kc_common; struct mtx kc_lock; uint32_t kc_ac_flags; uint32_t kc_ctx_flags; uint32_t kc_more_flags; #define LOCAL 1 #define OPEN 2 #define COMPAT_OLD_DES3 4 #define COMPAT_OLD_DES3_SELECTED 8 #define ACCEPTOR_SUBKEY 16 struct krb5_address kc_local_address; struct krb5_address kc_remote_address; uint16_t kc_local_port; uint16_t kc_remote_port; struct krb5_keyblock kc_keyblock; struct krb5_keyblock kc_local_subkey; struct krb5_keyblock kc_remote_subkey; volatile uint32_t kc_local_seqnumber; uint32_t kc_remote_seqnumber; uint32_t kc_keytype; uint32_t kc_cksumtype; struct krb5_data kc_source_name; struct krb5_data kc_target_name; uint32_t kc_lifetime; struct krb5_msg_order kc_msg_order; struct krb5_key_state *kc_tokenkey; struct krb5_key_state *kc_encryptkey; struct krb5_key_state *kc_checksumkey; struct krb5_key_state *kc_send_seal_Ke; struct krb5_key_state *kc_send_seal_Ki; struct krb5_key_state *kc_send_seal_Kc; struct krb5_key_state *kc_send_sign_Kc; struct krb5_key_state *kc_recv_seal_Ke; struct krb5_key_state *kc_recv_seal_Ki; struct krb5_key_state *kc_recv_seal_Kc; struct krb5_key_state *kc_recv_sign_Kc; }; static uint16_t get_uint16(const uint8_t **pp, size_t *lenp) { const uint8_t *p = *pp; uint16_t v; if (*lenp < 2) return (0); v = (p[0] << 8) | p[1]; *pp = p + 2; *lenp = *lenp - 2; return (v); } static uint32_t get_uint32(const uint8_t **pp, size_t *lenp) { const uint8_t *p = *pp; uint32_t v; if (*lenp < 4) return (0); v = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; *pp = p + 4; *lenp = *lenp - 4; return (v); } static void get_data(const uint8_t **pp, size_t *lenp, struct krb5_data *dp) { size_t sz = get_uint32(pp, lenp); dp->kd_length = sz; dp->kd_data = malloc(sz, M_GSSAPI, M_WAITOK); if (*lenp < sz) sz = *lenp; bcopy(*pp, dp->kd_data, sz); (*pp) += sz; (*lenp) -= sz; } static void delete_data(struct krb5_data *dp) { if (dp->kd_data) { free(dp->kd_data, M_GSSAPI); dp->kd_length = 0; dp->kd_data = NULL; } } static void get_address(const uint8_t **pp, size_t *lenp, struct krb5_address *ka) { ka->ka_type = get_uint16(pp, lenp); get_data(pp, lenp, &ka->ka_addr); } static void delete_address(struct krb5_address *ka) { delete_data(&ka->ka_addr); } static void get_keyblock(const uint8_t **pp, size_t *lenp, struct krb5_keyblock *kk) { kk->kk_type = get_uint16(pp, lenp); get_data(pp, lenp, &kk->kk_key); } static void delete_keyblock(struct krb5_keyblock *kk) { if (kk->kk_key.kd_data) bzero(kk->kk_key.kd_data, kk->kk_key.kd_length); delete_data(&kk->kk_key); } static void copy_key(struct krb5_keyblock *from, struct krb5_keyblock **to) { if (from->kk_key.kd_length) *to = from; else *to = NULL; } +static void +copy_lucid_key(gss_buffer_desc *from, uint32_t type, struct krb5_keyblock *to) +{ + + to->kk_type = type; + to->kk_key.kd_length = from->length; + if (from->length > 0) { + to->kk_key.kd_data = malloc(from->length, M_GSSAPI, M_WAITOK); + memcpy(to->kk_key.kd_data, from->value, from->length); + } +} + /* * Return non-zero if we are initiator. */ static __inline int is_initiator(struct krb5_context *kc) { return (kc->kc_more_flags & LOCAL); } /* * Return non-zero if we are acceptor. */ static __inline int is_acceptor(struct krb5_context *kc) { return !(kc->kc_more_flags & LOCAL); } static void get_initiator_subkey(struct krb5_context *kc, struct krb5_keyblock **kdp) { if (is_initiator(kc)) copy_key(&kc->kc_local_subkey, kdp); else copy_key(&kc->kc_remote_subkey, kdp); if (!*kdp) copy_key(&kc->kc_keyblock, kdp); } static void get_acceptor_subkey(struct krb5_context *kc, struct krb5_keyblock **kdp) { if (is_initiator(kc)) copy_key(&kc->kc_remote_subkey, kdp); else copy_key(&kc->kc_local_subkey, kdp); } static OM_uint32 get_keys(struct krb5_context *kc) { struct krb5_keyblock *keydata; struct krb5_encryption_class *ec; struct krb5_key_state *key; int etype; keydata = NULL; get_acceptor_subkey(kc, &keydata); if (!keydata) if ((kc->kc_more_flags & ACCEPTOR_SUBKEY) == 0) get_initiator_subkey(kc, &keydata); if (!keydata) return (GSS_S_FAILURE); /* * GSS-API treats all DES etypes the same and all DES3 etypes * the same. */ switch (keydata->kk_type) { case ETYPE_DES_CBC_CRC: case ETYPE_DES_CBC_MD4: case ETYPE_DES_CBC_MD5: etype = ETYPE_DES_CBC_CRC; break; case ETYPE_DES3_CBC_MD5: case ETYPE_DES3_CBC_SHA1: case ETYPE_OLD_DES3_CBC_SHA1: etype = ETYPE_DES3_CBC_SHA1; break; default: etype = keydata->kk_type; } ec = krb5_find_encryption_class(etype); if (!ec) return (GSS_S_FAILURE); key = krb5_create_key(ec); krb5_set_key(key, keydata->kk_key.kd_data); kc->kc_tokenkey = key; switch (etype) { case ETYPE_DES_CBC_CRC: case ETYPE_ARCFOUR_HMAC_MD5: case ETYPE_ARCFOUR_HMAC_MD5_56: { /* * Single DES and ARCFOUR uses a 'derived' key (XOR * with 0xf0) for encrypting wrap tokens. The original * key is used for checksums and sequence numbers. */ struct krb5_key_state *ekey; uint8_t *ekp, *kp; int i; ekey = krb5_create_key(ec); ekp = ekey->ks_key; kp = key->ks_key; for (i = 0; i < ec->ec_keylen; i++) ekp[i] = kp[i] ^ 0xf0; krb5_set_key(ekey, ekp); kc->kc_encryptkey = ekey; refcount_acquire(&key->ks_refs); kc->kc_checksumkey = key; break; } case ETYPE_DES3_CBC_SHA1: /* * Triple DES uses a RFC 3961 style derived key with * usage number KG_USAGE_SIGN for checksums. The * original key is used for encryption and sequence * numbers. */ kc->kc_checksumkey = krb5_get_checksum_key(key, KG_USAGE_SIGN); refcount_acquire(&key->ks_refs); kc->kc_encryptkey = key; break; default: /* * We need eight derived keys four for sending and * four for receiving. */ if (is_initiator(kc)) { /* * We are initiator. */ kc->kc_send_seal_Ke = krb5_get_encryption_key(key, KG_USAGE_INITIATOR_SEAL); kc->kc_send_seal_Ki = krb5_get_integrity_key(key, KG_USAGE_INITIATOR_SEAL); kc->kc_send_seal_Kc = krb5_get_checksum_key(key, KG_USAGE_INITIATOR_SEAL); kc->kc_send_sign_Kc = krb5_get_checksum_key(key, KG_USAGE_INITIATOR_SIGN); kc->kc_recv_seal_Ke = krb5_get_encryption_key(key, KG_USAGE_ACCEPTOR_SEAL); kc->kc_recv_seal_Ki = krb5_get_integrity_key(key, KG_USAGE_ACCEPTOR_SEAL); kc->kc_recv_seal_Kc = krb5_get_checksum_key(key, KG_USAGE_ACCEPTOR_SEAL); kc->kc_recv_sign_Kc = krb5_get_checksum_key(key, KG_USAGE_ACCEPTOR_SIGN); } else { /* * We are acceptor. */ kc->kc_send_seal_Ke = krb5_get_encryption_key(key, KG_USAGE_ACCEPTOR_SEAL); kc->kc_send_seal_Ki = krb5_get_integrity_key(key, KG_USAGE_ACCEPTOR_SEAL); kc->kc_send_seal_Kc = krb5_get_checksum_key(key, KG_USAGE_ACCEPTOR_SEAL); kc->kc_send_sign_Kc = krb5_get_checksum_key(key, KG_USAGE_ACCEPTOR_SIGN); kc->kc_recv_seal_Ke = krb5_get_encryption_key(key, KG_USAGE_INITIATOR_SEAL); kc->kc_recv_seal_Ki = krb5_get_integrity_key(key, KG_USAGE_INITIATOR_SEAL); kc->kc_recv_seal_Kc = krb5_get_checksum_key(key, KG_USAGE_INITIATOR_SEAL); kc->kc_recv_sign_Kc = krb5_get_checksum_key(key, KG_USAGE_INITIATOR_SIGN); } break; } return (GSS_S_COMPLETE); } static void krb5_init(gss_ctx_id_t ctx) { struct krb5_context *kc = (struct krb5_context *)ctx; mtx_init(&kc->kc_lock, "krb5 gss lock", NULL, MTX_DEF); } +static OM_uint32 +krb5_lucid_import(gss_ctx_id_t ctx, + enum sec_context_format format, + const gss_buffer_t context_token) +{ + struct krb5_context *kc = (struct krb5_context *)ctx; + kgss_lucid_desc *lctx = (kgss_lucid_desc *)context_token; + OM_uint32 res; + + kc->kc_more_flags = 0; + if (lctx->protocol == 0) { + kc->kc_cksumtype = lctx->rfc_sign; + kc->kc_keytype = lctx->rfc_seal; + copy_lucid_key(&lctx->ctx_key, lctx->ctx_type, + &kc->kc_keyblock); + } else if (lctx->protocol == 1) { + if (lctx->have_subkey != 0) { + if (lctx->initiate != 0) + copy_lucid_key(&lctx->subkey_key, + lctx->subkey_type, + &kc->kc_remote_subkey); + else + copy_lucid_key(&lctx->subkey_key, + lctx->subkey_type, + &kc->kc_local_subkey); + kc->kc_cksumtype = lctx->subkey_type; + kc->kc_keytype = lctx->subkey_type; + kc->kc_more_flags |= ACCEPTOR_SUBKEY; + } else { + if (lctx->initiate != 0) + copy_lucid_key(&lctx->ctx_key, + lctx->ctx_type, + &kc->kc_remote_subkey); + else + copy_lucid_key(&lctx->ctx_key, + lctx->ctx_type, + &kc->kc_local_subkey); + kc->kc_cksumtype = lctx->ctx_type; + kc->kc_keytype = lctx->ctx_type; + } + } else { + return (GSS_S_DEFECTIVE_TOKEN); + } + kc->kc_local_seqnumber = lctx->send_seq; + kc->kc_remote_seqnumber = lctx->recv_seq; + if (lctx->initiate != 0) + kc->kc_more_flags |= LOCAL; + kc->kc_lifetime = lctx->endtime; + kc->kc_msg_order.km_flags = 0; + + res = get_keys(kc); + if (GSS_ERROR(res)) + return (res); + + /* + * We don't need these anymore. + */ + delete_keyblock(&kc->kc_keyblock); + delete_keyblock(&kc->kc_local_subkey); + delete_keyblock(&kc->kc_remote_subkey); + + return (GSS_S_COMPLETE); +} + static OM_uint32 krb5_import(gss_ctx_id_t ctx, enum sec_context_format format, const gss_buffer_t context_token) { struct krb5_context *kc = (struct krb5_context *)ctx; OM_uint32 res; const uint8_t *p = (const uint8_t *) context_token->value; size_t len = context_token->length; uint32_t flags; int i; + /* For MIT, just call krb5_lucid_import(). */ + if (format == MIT_V1) + return (krb5_lucid_import(ctx, format, context_token)); + /* * We support heimdal 0.6 and heimdal 1.1 */ if (format != KGSS_HEIMDAL_0_6 && format != KGSS_HEIMDAL_1_1) return (GSS_S_DEFECTIVE_TOKEN); #define SC_LOCAL_ADDRESS 1 #define SC_REMOTE_ADDRESS 2 #define SC_KEYBLOCK 4 #define SC_LOCAL_SUBKEY 8 #define SC_REMOTE_SUBKEY 16 /* * Ensure that the token starts with krb5 oid. */ if (p[0] != 0x00 || p[1] != krb5_mech_oid.length || len < krb5_mech_oid.length + 2 || bcmp(krb5_mech_oid.elements, p + 2, krb5_mech_oid.length)) return (GSS_S_DEFECTIVE_TOKEN); p += krb5_mech_oid.length + 2; len -= krb5_mech_oid.length + 2; flags = get_uint32(&p, &len); kc->kc_ac_flags = get_uint32(&p, &len); if (flags & SC_LOCAL_ADDRESS) get_address(&p, &len, &kc->kc_local_address); if (flags & SC_REMOTE_ADDRESS) get_address(&p, &len, &kc->kc_remote_address); kc->kc_local_port = get_uint16(&p, &len); kc->kc_remote_port = get_uint16(&p, &len); if (flags & SC_KEYBLOCK) get_keyblock(&p, &len, &kc->kc_keyblock); if (flags & SC_LOCAL_SUBKEY) get_keyblock(&p, &len, &kc->kc_local_subkey); if (flags & SC_REMOTE_SUBKEY) get_keyblock(&p, &len, &kc->kc_remote_subkey); kc->kc_local_seqnumber = get_uint32(&p, &len); kc->kc_remote_seqnumber = get_uint32(&p, &len); kc->kc_keytype = get_uint32(&p, &len); kc->kc_cksumtype = get_uint32(&p, &len); get_data(&p, &len, &kc->kc_source_name); get_data(&p, &len, &kc->kc_target_name); kc->kc_ctx_flags = get_uint32(&p, &len); kc->kc_more_flags = get_uint32(&p, &len); kc->kc_lifetime = get_uint32(&p, &len); /* * Heimdal 1.1 adds the message order stuff. */ if (format == KGSS_HEIMDAL_1_1) { kc->kc_msg_order.km_flags = get_uint32(&p, &len); kc->kc_msg_order.km_start = get_uint32(&p, &len); kc->kc_msg_order.km_length = get_uint32(&p, &len); kc->kc_msg_order.km_jitter_window = get_uint32(&p, &len); kc->kc_msg_order.km_first_seq = get_uint32(&p, &len); kc->kc_msg_order.km_elem = malloc(kc->kc_msg_order.km_jitter_window * sizeof(uint32_t), M_GSSAPI, M_WAITOK); for (i = 0; i < kc->kc_msg_order.km_jitter_window; i++) kc->kc_msg_order.km_elem[i] = get_uint32(&p, &len); } else { kc->kc_msg_order.km_flags = 0; } res = get_keys(kc); if (GSS_ERROR(res)) return (res); /* * We don't need these anymore. */ delete_keyblock(&kc->kc_keyblock); delete_keyblock(&kc->kc_local_subkey); delete_keyblock(&kc->kc_remote_subkey); return (GSS_S_COMPLETE); } static void krb5_delete(gss_ctx_id_t ctx, gss_buffer_t output_token) { struct krb5_context *kc = (struct krb5_context *)ctx; delete_address(&kc->kc_local_address); delete_address(&kc->kc_remote_address); delete_keyblock(&kc->kc_keyblock); delete_keyblock(&kc->kc_local_subkey); delete_keyblock(&kc->kc_remote_subkey); delete_data(&kc->kc_source_name); delete_data(&kc->kc_target_name); if (kc->kc_msg_order.km_elem) free(kc->kc_msg_order.km_elem, M_GSSAPI); if (output_token) { output_token->length = 0; output_token->value = NULL; } if (kc->kc_tokenkey) { krb5_free_key(kc->kc_tokenkey); if (kc->kc_encryptkey) { krb5_free_key(kc->kc_encryptkey); krb5_free_key(kc->kc_checksumkey); } else { krb5_free_key(kc->kc_send_seal_Ke); krb5_free_key(kc->kc_send_seal_Ki); krb5_free_key(kc->kc_send_seal_Kc); krb5_free_key(kc->kc_send_sign_Kc); krb5_free_key(kc->kc_recv_seal_Ke); krb5_free_key(kc->kc_recv_seal_Ki); krb5_free_key(kc->kc_recv_seal_Kc); krb5_free_key(kc->kc_recv_sign_Kc); } } mtx_destroy(&kc->kc_lock); } static gss_OID krb5_mech_type(gss_ctx_id_t ctx) { return (&krb5_mech_oid); } /* * Make a token with the given type and length (the length includes * the TOK_ID), initialising the token header appropriately. Return a * pointer to the TOK_ID of the token. A new mbuf is allocated with * the framing header plus hlen bytes of space. * * Format is as follows: * * 0x60 [APPLICATION 0] SEQUENCE * DER encoded length length of oid + type + inner token length * 0x06 NN OID of mechanism type * TT TT TOK_ID * data for inner token * * 1: der encoded length */ static void * krb5_make_token(char tok_id[2], size_t hlen, size_t len, struct mbuf **mp) { size_t inside_len, len_len, tlen; gss_OID oid = &krb5_mech_oid; struct mbuf *m; uint8_t *p; inside_len = 2 + oid->length + len; if (inside_len < 128) len_len = 1; else if (inside_len < 0x100) len_len = 2; else if (inside_len < 0x10000) len_len = 3; else if (inside_len < 0x1000000) len_len = 4; else len_len = 5; tlen = 1 + len_len + 2 + oid->length + hlen; KASSERT(tlen <= MLEN, ("token head too large")); MGET(m, M_WAITOK, MT_DATA); M_ALIGN(m, tlen); m->m_len = tlen; p = (uint8_t *) m->m_data; *p++ = 0x60; switch (len_len) { case 1: *p++ = inside_len; break; case 2: *p++ = 0x81; *p++ = inside_len; break; case 3: *p++ = 0x82; *p++ = inside_len >> 8; *p++ = inside_len; break; case 4: *p++ = 0x83; *p++ = inside_len >> 16; *p++ = inside_len >> 8; *p++ = inside_len; break; case 5: *p++ = 0x84; *p++ = inside_len >> 24; *p++ = inside_len >> 16; *p++ = inside_len >> 8; *p++ = inside_len; break; } *p++ = 0x06; *p++ = oid->length; bcopy(oid->elements, p, oid->length); p += oid->length; p[0] = tok_id[0]; p[1] = tok_id[1]; *mp = m; return (p); } /* * Verify a token, checking the inner token length and mechanism oid. * pointer to the first byte of the TOK_ID. The length of the * encapsulated data is checked to be at least len bytes; the actual * length of the encapsulated data (including TOK_ID) is returned in * *encap_len. * * If can_pullup is TRUE and the token header is fragmented, we will * rearrange it. * * Format is as follows: * * 0x60 [APPLICATION 0] SEQUENCE * DER encoded length length of oid + type + inner token length * 0x06 NN OID of mechanism type * TT TT TOK_ID * data for inner token * * 1: der encoded length */ static void * krb5_verify_token(char tok_id[2], size_t len, struct mbuf **mp, size_t *encap_len, bool_t can_pullup) { struct mbuf *m; size_t tlen, hlen, len_len, inside_len; gss_OID oid = &krb5_mech_oid; uint8_t *p; m = *mp; tlen = m_length(m, NULL); if (tlen < 2) return (NULL); /* * Ensure that at least the framing part of the token is * contigous. */ if (m->m_len < 2) { if (can_pullup) *mp = m = m_pullup(m, 2); else return (NULL); } p = m->m_data; if (*p++ != 0x60) return (NULL); if (*p < 0x80) { inside_len = *p++; len_len = 1; } else { /* * Ensure there is enough space for the DER encoded length. */ len_len = (*p & 0x7f) + 1; if (tlen < len_len + 1) return (NULL); if (m->m_len < len_len + 1) { if (can_pullup) *mp = m = m_pullup(m, len_len + 1); else return (NULL); p = m->m_data + 1; } switch (*p++) { case 0x81: inside_len = *p++; break; case 0x82: inside_len = (p[0] << 8) | p[1]; p += 2; break; case 0x83: inside_len = (p[0] << 16) | (p[1] << 8) | p[2]; p += 3; break; case 0x84: inside_len = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; p += 4; break; default: return (NULL); } } if (tlen != inside_len + len_len + 1) return (NULL); if (inside_len < 2 + oid->length + len) return (NULL); /* * Now that we know the value of len_len, we can pullup the * whole header. The header is 1 + len_len + 2 + oid->length + * len bytes. */ hlen = 1 + len_len + 2 + oid->length + len; if (m->m_len < hlen) { if (can_pullup) *mp = m = m_pullup(m, hlen); else return (NULL); p = m->m_data + 1 + len_len; } if (*p++ != 0x06) return (NULL); if (*p++ != oid->length) return (NULL); if (bcmp(oid->elements, p, oid->length)) return (NULL); p += oid->length; if (p[0] != tok_id[0]) return (NULL); if (p[1] != tok_id[1]) return (NULL); *encap_len = inside_len - 2 - oid->length; return (p); } static void krb5_insert_seq(struct krb5_msg_order *mo, uint32_t seq, int index) { int i; if (mo->km_length < mo->km_jitter_window) mo->km_length++; for (i = mo->km_length - 1; i > index; i--) mo->km_elem[i] = mo->km_elem[i - 1]; mo->km_elem[index] = seq; } /* * Check sequence numbers according to RFC 2743 section 1.2.3. */ static OM_uint32 krb5_sequence_check(struct krb5_context *kc, uint32_t seq) { OM_uint32 res = GSS_S_FAILURE; struct krb5_msg_order *mo = &kc->kc_msg_order; int check_sequence = mo->km_flags & GSS_C_SEQUENCE_FLAG; int check_replay = mo->km_flags & GSS_C_REPLAY_FLAG; int i; mtx_lock(&kc->kc_lock); /* * Message is in-sequence with no gap. */ if (mo->km_length == 0 || seq == mo->km_elem[0] + 1) { /* * This message is received in-sequence with no gaps. */ krb5_insert_seq(mo, seq, 0); res = GSS_S_COMPLETE; goto out; } if (seq > mo->km_elem[0]) { /* * This message is received in-sequence with a gap. */ krb5_insert_seq(mo, seq, 0); if (check_sequence) res = GSS_S_GAP_TOKEN; else res = GSS_S_COMPLETE; goto out; } if (seq < mo->km_elem[mo->km_length - 1]) { if (check_replay && !check_sequence) res = GSS_S_OLD_TOKEN; else res = GSS_S_UNSEQ_TOKEN; goto out; } for (i = 0; i < mo->km_length; i++) { if (mo->km_elem[i] == seq) { res = GSS_S_DUPLICATE_TOKEN; goto out; } if (mo->km_elem[i] < seq) { /* * We need to insert this seq here, */ krb5_insert_seq(mo, seq, i); if (check_replay && !check_sequence) res = GSS_S_COMPLETE; else res = GSS_S_UNSEQ_TOKEN; goto out; } } out: mtx_unlock(&kc->kc_lock); return (res); } static uint8_t sgn_alg_des_md5[] = { 0x00, 0x00 }; static uint8_t seal_alg_des[] = { 0x00, 0x00 }; static uint8_t sgn_alg_des3_sha1[] = { 0x04, 0x00 }; static uint8_t seal_alg_des3[] = { 0x02, 0x00 }; static uint8_t seal_alg_rc4[] = { 0x10, 0x00 }; static uint8_t sgn_alg_hmac_md5[] = { 0x11, 0x00 }; /* * Return the size of the inner token given the use of the key's * encryption class. For wrap tokens, the length of the padded * plaintext will be added to this. */ static size_t token_length(struct krb5_key_state *key) { return (16 + key->ks_class->ec_checksumlen); } static OM_uint32 krb5_get_mic_old(struct krb5_context *kc, struct mbuf *m, struct mbuf **micp, uint8_t sgn_alg[2]) { struct mbuf *mlast, *mic, *tm; uint8_t *p, dir; size_t tlen, mlen, cklen; uint32_t seq; char buf[8]; mlen = m_length(m, &mlast); tlen = token_length(kc->kc_tokenkey); p = krb5_make_token("\x01\x01", tlen, tlen, &mic); p += 2; /* TOK_ID */ *p++ = sgn_alg[0]; /* SGN_ALG */ *p++ = sgn_alg[1]; *p++ = 0xff; /* filler */ *p++ = 0xff; *p++ = 0xff; *p++ = 0xff; /* * SGN_CKSUM: * * Calculate the keyed checksum of the token header plus the * message. */ cklen = kc->kc_checksumkey->ks_class->ec_checksumlen; mic->m_len = p - (uint8_t *) mic->m_data; mic->m_next = m; MGET(tm, M_WAITOK, MT_DATA); tm->m_len = cklen; mlast->m_next = tm; krb5_checksum(kc->kc_checksumkey, 15, mic, mic->m_len - 8, 8 + mlen, cklen); bcopy(tm->m_data, p + 8, cklen); mic->m_next = NULL; mlast->m_next = NULL; m_free(tm); /* * SND_SEQ: * * Take the four bytes of the sequence number least * significant first followed by four bytes of direction * marker (zero for initiator and 0xff for acceptor). Encrypt * that data using the SGN_CKSUM as IV. Note: ARC4 wants the * sequence number big-endian. */ seq = atomic_fetchadd_32(&kc->kc_local_seqnumber, 1); if (sgn_alg[0] == 0x11) { p[0] = (seq >> 24); p[1] = (seq >> 16); p[2] = (seq >> 8); p[3] = (seq >> 0); } else { p[0] = (seq >> 0); p[1] = (seq >> 8); p[2] = (seq >> 16); p[3] = (seq >> 24); } if (is_initiator(kc)) { dir = 0; } else { dir = 0xff; } p[4] = dir; p[5] = dir; p[6] = dir; p[7] = dir; bcopy(p + 8, buf, 8); /* * Set the mic buffer to its final size so that the encrypt * can see the SND_SEQ part. */ mic->m_len += 8 + cklen; krb5_encrypt(kc->kc_tokenkey, mic, mic->m_len - cklen - 8, 8, buf, 8); *micp = mic; return (GSS_S_COMPLETE); } static OM_uint32 krb5_get_mic_new(struct krb5_context *kc, struct mbuf *m, struct mbuf **micp) { struct krb5_key_state *key = kc->kc_send_sign_Kc; struct mbuf *mlast, *mic; uint8_t *p; int flags; size_t mlen, cklen; uint32_t seq; mlen = m_length(m, &mlast); cklen = key->ks_class->ec_checksumlen; KASSERT(16 + cklen <= MLEN, ("checksum too large for an mbuf")); MGET(mic, M_WAITOK, MT_DATA); M_ALIGN(mic, 16 + cklen); mic->m_len = 16 + cklen; p = mic->m_data; /* TOK_ID */ p[0] = 0x04; p[1] = 0x04; /* Flags */ flags = 0; if (is_acceptor(kc)) flags |= GSS_TOKEN_SENT_BY_ACCEPTOR; if (kc->kc_more_flags & ACCEPTOR_SUBKEY) flags |= GSS_TOKEN_ACCEPTOR_SUBKEY; p[2] = flags; /* Filler */ p[3] = 0xff; p[4] = 0xff; p[5] = 0xff; p[6] = 0xff; p[7] = 0xff; /* SND_SEQ */ p[8] = 0; p[9] = 0; p[10] = 0; p[11] = 0; seq = atomic_fetchadd_32(&kc->kc_local_seqnumber, 1); p[12] = (seq >> 24); p[13] = (seq >> 16); p[14] = (seq >> 8); p[15] = (seq >> 0); /* * SGN_CKSUM: * * Calculate the keyed checksum of the message plus the first * 16 bytes of the token header. */ mlast->m_next = mic; krb5_checksum(key, 0, m, 0, mlen + 16, cklen); mlast->m_next = NULL; *micp = mic; return (GSS_S_COMPLETE); } static OM_uint32 krb5_get_mic(gss_ctx_id_t ctx, OM_uint32 *minor_status, gss_qop_t qop_req, struct mbuf *m, struct mbuf **micp) { struct krb5_context *kc = (struct krb5_context *)ctx; *minor_status = 0; if (qop_req != GSS_C_QOP_DEFAULT) return (GSS_S_BAD_QOP); if (time_uptime > kc->kc_lifetime) return (GSS_S_CONTEXT_EXPIRED); switch (kc->kc_tokenkey->ks_class->ec_type) { case ETYPE_DES_CBC_CRC: return (krb5_get_mic_old(kc, m, micp, sgn_alg_des_md5)); case ETYPE_DES3_CBC_SHA1: return (krb5_get_mic_old(kc, m, micp, sgn_alg_des3_sha1)); case ETYPE_ARCFOUR_HMAC_MD5: case ETYPE_ARCFOUR_HMAC_MD5_56: return (krb5_get_mic_old(kc, m, micp, sgn_alg_hmac_md5)); default: return (krb5_get_mic_new(kc, m, micp)); } return (GSS_S_FAILURE); } static OM_uint32 krb5_verify_mic_old(struct krb5_context *kc, struct mbuf *m, struct mbuf *mic, uint8_t sgn_alg[2]) { struct mbuf *mlast, *tm; uint8_t *p, *tp, dir; size_t mlen, tlen, elen; size_t cklen; uint32_t seq; mlen = m_length(m, &mlast); tlen = token_length(kc->kc_tokenkey); p = krb5_verify_token("\x01\x01", tlen, &mic, &elen, FALSE); if (!p) return (GSS_S_DEFECTIVE_TOKEN); #if 0 /* * Disable this check - heimdal-1.1 generates DES3 MIC tokens * that are 2 bytes too big. */ if (elen != tlen) return (GSS_S_DEFECTIVE_TOKEN); #endif /* TOK_ID */ p += 2; /* SGN_ALG */ if (p[0] != sgn_alg[0] || p[1] != sgn_alg[1]) return (GSS_S_DEFECTIVE_TOKEN); p += 2; if (p[0] != 0xff || p[1] != 0xff || p[2] != 0xff || p[3] != 0xff) return (GSS_S_DEFECTIVE_TOKEN); p += 4; /* * SGN_CKSUM: * * Calculate the keyed checksum of the token header plus the * message. */ cklen = kc->kc_checksumkey->ks_class->ec_checksumlen; mic->m_len = p - (uint8_t *) mic->m_data; mic->m_next = m; MGET(tm, M_WAITOK, MT_DATA); tm->m_len = cklen; mlast->m_next = tm; krb5_checksum(kc->kc_checksumkey, 15, mic, mic->m_len - 8, 8 + mlen, cklen); mic->m_next = NULL; mlast->m_next = NULL; if (bcmp(tm->m_data, p + 8, cklen)) { m_free(tm); return (GSS_S_BAD_SIG); } /* * SND_SEQ: * * Take the four bytes of the sequence number least * significant first followed by four bytes of direction * marker (zero for initiator and 0xff for acceptor). Encrypt * that data using the SGN_CKSUM as IV. Note: ARC4 wants the * sequence number big-endian. */ bcopy(p, tm->m_data, 8); tm->m_len = 8; krb5_decrypt(kc->kc_tokenkey, tm, 0, 8, p + 8, 8); tp = tm->m_data; if (sgn_alg[0] == 0x11) { seq = tp[3] | (tp[2] << 8) | (tp[1] << 16) | (tp[0] << 24); } else { seq = tp[0] | (tp[1] << 8) | (tp[2] << 16) | (tp[3] << 24); } if (is_initiator(kc)) { dir = 0xff; } else { dir = 0; } if (tp[4] != dir || tp[5] != dir || tp[6] != dir || tp[7] != dir) { m_free(tm); return (GSS_S_DEFECTIVE_TOKEN); } m_free(tm); if (kc->kc_msg_order.km_flags & (GSS_C_REPLAY_FLAG | GSS_C_SEQUENCE_FLAG)) { return (krb5_sequence_check(kc, seq)); } return (GSS_S_COMPLETE); } static OM_uint32 krb5_verify_mic_new(struct krb5_context *kc, struct mbuf *m, struct mbuf *mic) { OM_uint32 res; struct krb5_key_state *key = kc->kc_recv_sign_Kc; struct mbuf *mlast; uint8_t *p; int flags; size_t mlen, cklen; char buf[32]; mlen = m_length(m, &mlast); cklen = key->ks_class->ec_checksumlen; KASSERT(mic->m_next == NULL, ("MIC should be contiguous")); if (mic->m_len != 16 + cklen) return (GSS_S_DEFECTIVE_TOKEN); p = mic->m_data; /* TOK_ID */ if (p[0] != 0x04) return (GSS_S_DEFECTIVE_TOKEN); if (p[1] != 0x04) return (GSS_S_DEFECTIVE_TOKEN); /* Flags */ flags = 0; if (is_initiator(kc)) flags |= GSS_TOKEN_SENT_BY_ACCEPTOR; if (kc->kc_more_flags & ACCEPTOR_SUBKEY) flags |= GSS_TOKEN_ACCEPTOR_SUBKEY; if (p[2] != flags) return (GSS_S_DEFECTIVE_TOKEN); /* Filler */ if (p[3] != 0xff) return (GSS_S_DEFECTIVE_TOKEN); if (p[4] != 0xff) return (GSS_S_DEFECTIVE_TOKEN); if (p[5] != 0xff) return (GSS_S_DEFECTIVE_TOKEN); if (p[6] != 0xff) return (GSS_S_DEFECTIVE_TOKEN); if (p[7] != 0xff) return (GSS_S_DEFECTIVE_TOKEN); /* SND_SEQ */ if (kc->kc_msg_order.km_flags & (GSS_C_REPLAY_FLAG | GSS_C_SEQUENCE_FLAG)) { uint32_t seq; if (p[8] || p[9] || p[10] || p[11]) { res = GSS_S_UNSEQ_TOKEN; } else { seq = (p[12] << 24) | (p[13] << 16) | (p[14] << 8) | p[15]; res = krb5_sequence_check(kc, seq); } if (GSS_ERROR(res)) return (res); } else { res = GSS_S_COMPLETE; } /* * SGN_CKSUM: * * Calculate the keyed checksum of the message plus the first * 16 bytes of the token header. */ m_copydata(mic, 16, cklen, buf); mlast->m_next = mic; krb5_checksum(key, 0, m, 0, mlen + 16, cklen); mlast->m_next = NULL; if (bcmp(buf, p + 16, cklen)) { return (GSS_S_BAD_SIG); } return (GSS_S_COMPLETE); } static OM_uint32 krb5_verify_mic(gss_ctx_id_t ctx, OM_uint32 *minor_status, struct mbuf *m, struct mbuf *mic, gss_qop_t *qop_state) { struct krb5_context *kc = (struct krb5_context *)ctx; *minor_status = 0; if (qop_state) *qop_state = GSS_C_QOP_DEFAULT; if (time_uptime > kc->kc_lifetime) return (GSS_S_CONTEXT_EXPIRED); switch (kc->kc_tokenkey->ks_class->ec_type) { case ETYPE_DES_CBC_CRC: return (krb5_verify_mic_old(kc, m, mic, sgn_alg_des_md5)); case ETYPE_ARCFOUR_HMAC_MD5: case ETYPE_ARCFOUR_HMAC_MD5_56: return (krb5_verify_mic_old(kc, m, mic, sgn_alg_hmac_md5)); case ETYPE_DES3_CBC_SHA1: return (krb5_verify_mic_old(kc, m, mic, sgn_alg_des3_sha1)); default: return (krb5_verify_mic_new(kc, m, mic)); } return (GSS_S_FAILURE); } static OM_uint32 krb5_wrap_old(struct krb5_context *kc, int conf_req_flag, struct mbuf **mp, int *conf_state, uint8_t sgn_alg[2], uint8_t seal_alg[2]) { struct mbuf *m, *mlast, *tm, *cm, *pm; size_t mlen, tlen, padlen, datalen; uint8_t *p, dir; size_t cklen; uint8_t buf[8]; uint32_t seq; /* * How many trailing pad bytes do we need? */ m = *mp; mlen = m_length(m, &mlast); tlen = kc->kc_tokenkey->ks_class->ec_msgblocklen; padlen = tlen - (mlen % tlen); /* * The data part of the token has eight bytes of random * confounder prepended and followed by up to eight bytes of * padding bytes each of which is set to the number of padding * bytes. */ datalen = mlen + 8 + padlen; tlen = token_length(kc->kc_tokenkey); p = krb5_make_token("\x02\x01", tlen, datalen + tlen, &tm); p += 2; /* TOK_ID */ *p++ = sgn_alg[0]; /* SGN_ALG */ *p++ = sgn_alg[1]; if (conf_req_flag) { *p++ = seal_alg[0]; /* SEAL_ALG */ *p++ = seal_alg[1]; } else { *p++ = 0xff; /* SEAL_ALG = none */ *p++ = 0xff; } *p++ = 0xff; /* filler */ *p++ = 0xff; /* * Copy the padded message data. */ if (M_LEADINGSPACE(m) >= 8) { m->m_data -= 8; m->m_len += 8; } else { MGET(cm, M_WAITOK, MT_DATA); cm->m_len = 8; cm->m_next = m; m = cm; } arc4rand(m->m_data, 8, 0); if (M_TRAILINGSPACE(mlast) >= padlen) { memset(mlast->m_data + mlast->m_len, padlen, padlen); mlast->m_len += padlen; } else { MGET(pm, M_WAITOK, MT_DATA); memset(pm->m_data, padlen, padlen); pm->m_len = padlen; mlast->m_next = pm; mlast = pm; } tm->m_next = m; /* * SGN_CKSUM: * * Calculate the keyed checksum of the token header plus the * padded message. Fiddle with tm->m_len so that we only * checksum the 8 bytes of head that we care about. */ cklen = kc->kc_checksumkey->ks_class->ec_checksumlen; tlen = tm->m_len; tm->m_len = p - (uint8_t *) tm->m_data; MGET(cm, M_WAITOK, MT_DATA); cm->m_len = cklen; mlast->m_next = cm; krb5_checksum(kc->kc_checksumkey, 13, tm, tm->m_len - 8, datalen + 8, cklen); tm->m_len = tlen; mlast->m_next = NULL; bcopy(cm->m_data, p + 8, cklen); m_free(cm); /* * SND_SEQ: * * Take the four bytes of the sequence number least * significant first (most significant first for ARCFOUR) * followed by four bytes of direction marker (zero for * initiator and 0xff for acceptor). Encrypt that data using * the SGN_CKSUM as IV. */ seq = atomic_fetchadd_32(&kc->kc_local_seqnumber, 1); if (sgn_alg[0] == 0x11) { p[0] = (seq >> 24); p[1] = (seq >> 16); p[2] = (seq >> 8); p[3] = (seq >> 0); } else { p[0] = (seq >> 0); p[1] = (seq >> 8); p[2] = (seq >> 16); p[3] = (seq >> 24); } if (is_initiator(kc)) { dir = 0; } else { dir = 0xff; } p[4] = dir; p[5] = dir; p[6] = dir; p[7] = dir; krb5_encrypt(kc->kc_tokenkey, tm, p - (uint8_t *) tm->m_data, 8, p + 8, 8); if (conf_req_flag) { /* * Encrypt the padded message with an IV of zero for * DES and DES3, or an IV of the sequence number in * big-endian format for ARCFOUR. */ if (seal_alg[0] == 0x10) { buf[0] = (seq >> 24); buf[1] = (seq >> 16); buf[2] = (seq >> 8); buf[3] = (seq >> 0); krb5_encrypt(kc->kc_encryptkey, m, 0, datalen, buf, 4); } else { krb5_encrypt(kc->kc_encryptkey, m, 0, datalen, NULL, 0); } } if (conf_state) *conf_state = conf_req_flag; *mp = tm; return (GSS_S_COMPLETE); } static OM_uint32 krb5_wrap_new(struct krb5_context *kc, int conf_req_flag, struct mbuf **mp, int *conf_state) { struct krb5_key_state *Ke = kc->kc_send_seal_Ke; struct krb5_key_state *Ki = kc->kc_send_seal_Ki; struct krb5_key_state *Kc = kc->kc_send_seal_Kc; const struct krb5_encryption_class *ec = Ke->ks_class; struct mbuf *m, *mlast, *tm; uint8_t *p; int flags, EC; size_t mlen, blen, mblen, cklen, ctlen; uint32_t seq; static char zpad[32]; m = *mp; mlen = m_length(m, &mlast); blen = ec->ec_blocklen; mblen = ec->ec_msgblocklen; cklen = ec->ec_checksumlen; if (conf_req_flag) { /* * For sealed messages, we need space for 16 bytes of * header, blen confounder, plaintext, padding, copy * of header and checksum. * * We pad to mblen (which may be different from * blen). If the encryption class is using CTS, mblen * will be one (i.e. no padding required). */ if (mblen > 1) EC = mlen % mblen; else EC = 0; ctlen = blen + mlen + EC + 16; /* * Put initial header and confounder before the * message. */ M_PREPEND(m, 16 + blen, M_WAITOK); /* * Append padding + copy of header and checksum. Try * to fit this into the end of the original message, * otherwise allocate a trailer. */ if (M_TRAILINGSPACE(mlast) >= EC + 16 + cklen) { tm = NULL; mlast->m_len += EC + 16 + cklen; } else { MGET(tm, M_WAITOK, MT_DATA); tm->m_len = EC + 16 + cklen; mlast->m_next = tm; } } else { /* * For unsealed messages, we need 16 bytes of header * plus space for the plaintext and a checksum. EC is * set to the checksum size. We leave space in tm for * a copy of the header - this will be trimmed later. */ M_PREPEND(m, 16, M_WAITOK); MGET(tm, M_WAITOK, MT_DATA); tm->m_len = cklen + 16; mlast->m_next = tm; ctlen = 0; EC = cklen; } p = m->m_data; /* TOK_ID */ p[0] = 0x05; p[1] = 0x04; /* Flags */ flags = 0; if (conf_req_flag) flags = GSS_TOKEN_SEALED; if (is_acceptor(kc)) flags |= GSS_TOKEN_SENT_BY_ACCEPTOR; if (kc->kc_more_flags & ACCEPTOR_SUBKEY) flags |= GSS_TOKEN_ACCEPTOR_SUBKEY; p[2] = flags; /* Filler */ p[3] = 0xff; /* EC + RRC - set to zero initially */ p[4] = 0; p[5] = 0; p[6] = 0; p[7] = 0; /* SND_SEQ */ p[8] = 0; p[9] = 0; p[10] = 0; p[11] = 0; seq = atomic_fetchadd_32(&kc->kc_local_seqnumber, 1); p[12] = (seq >> 24); p[13] = (seq >> 16); p[14] = (seq >> 8); p[15] = (seq >> 0); if (conf_req_flag) { /* * Encrypt according to RFC 4121 section 4.2 and RFC * 3961 section 5.3. Note: we don't generate tokens * with RRC values other than zero. If we did, we * should zero RRC in the copied header. */ arc4rand(p + 16, blen, 0); if (EC) { m_copyback(m, 16 + blen + mlen, EC, zpad); } m_copyback(m, 16 + blen + mlen + EC, 16, p); krb5_checksum(Ki, 0, m, 16, ctlen, cklen); krb5_encrypt(Ke, m, 16, ctlen, NULL, 0); } else { /* * The plaintext message is followed by a checksum of * the plaintext plus a version of the header where EC * and RRC are set to zero. Also, the original EC must * be our checksum size. */ bcopy(p, tm->m_data, 16); krb5_checksum(Kc, 0, m, 16, mlen + 16, cklen); tm->m_data += 16; tm->m_len -= 16; } /* * Finally set EC to its actual value */ p[4] = EC >> 8; p[5] = EC; *mp = m; return (GSS_S_COMPLETE); } static OM_uint32 krb5_wrap(gss_ctx_id_t ctx, OM_uint32 *minor_status, int conf_req_flag, gss_qop_t qop_req, struct mbuf **mp, int *conf_state) { struct krb5_context *kc = (struct krb5_context *)ctx; *minor_status = 0; if (conf_state) *conf_state = 0; if (qop_req != GSS_C_QOP_DEFAULT) return (GSS_S_BAD_QOP); if (time_uptime > kc->kc_lifetime) return (GSS_S_CONTEXT_EXPIRED); switch (kc->kc_tokenkey->ks_class->ec_type) { case ETYPE_DES_CBC_CRC: return (krb5_wrap_old(kc, conf_req_flag, mp, conf_state, sgn_alg_des_md5, seal_alg_des)); case ETYPE_ARCFOUR_HMAC_MD5: case ETYPE_ARCFOUR_HMAC_MD5_56: return (krb5_wrap_old(kc, conf_req_flag, mp, conf_state, sgn_alg_hmac_md5, seal_alg_rc4)); case ETYPE_DES3_CBC_SHA1: return (krb5_wrap_old(kc, conf_req_flag, mp, conf_state, sgn_alg_des3_sha1, seal_alg_des3)); default: return (krb5_wrap_new(kc, conf_req_flag, mp, conf_state)); } return (GSS_S_FAILURE); } static void m_trim(struct mbuf *m, int len) { struct mbuf *n; int off; if (m == NULL) return; n = m_getptr(m, len, &off); if (n) { n->m_len = off; if (n->m_next) { m_freem(n->m_next); n->m_next = NULL; } } } static OM_uint32 krb5_unwrap_old(struct krb5_context *kc, struct mbuf **mp, int *conf_state, uint8_t sgn_alg[2], uint8_t seal_alg[2]) { OM_uint32 res; struct mbuf *m, *mlast, *hm, *cm, *n; uint8_t *p, dir; size_t tlen, elen, datalen, padlen; size_t cklen; uint8_t buf[32]; uint32_t seq; int i, conf; m = *mp; m_length(m, &mlast); tlen = token_length(kc->kc_tokenkey); cklen = kc->kc_tokenkey->ks_class->ec_checksumlen; p = krb5_verify_token("\x02\x01", tlen, &m, &elen, TRUE); *mp = m; if (!p) return (GSS_S_DEFECTIVE_TOKEN); datalen = elen - tlen; /* * Trim the framing header first to make life a little easier * later. */ m_adj(m, p - (uint8_t *) m->m_data); /* TOK_ID */ p += 2; /* SGN_ALG */ if (p[0] != sgn_alg[0] || p[1] != sgn_alg[1]) return (GSS_S_DEFECTIVE_TOKEN); p += 2; /* SEAL_ALG */ if (p[0] == seal_alg[0] && p[1] == seal_alg[1]) conf = 1; else if (p[0] == 0xff && p[1] == 0xff) conf = 0; else return (GSS_S_DEFECTIVE_TOKEN); p += 2; if (p[0] != 0xff || p[1] != 0xff) return (GSS_S_DEFECTIVE_TOKEN); p += 2; /* * SND_SEQ: * * Take the four bytes of the sequence number least * significant first (most significant for ARCFOUR) followed * by four bytes of direction marker (zero for initiator and * 0xff for acceptor). Encrypt that data using the SGN_CKSUM * as IV. */ krb5_decrypt(kc->kc_tokenkey, m, 8, 8, p + 8, 8); if (sgn_alg[0] == 0x11) { seq = p[3] | (p[2] << 8) | (p[1] << 16) | (p[0] << 24); } else { seq = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); } if (is_initiator(kc)) { dir = 0xff; } else { dir = 0; } if (p[4] != dir || p[5] != dir || p[6] != dir || p[7] != dir) return (GSS_S_DEFECTIVE_TOKEN); if (kc->kc_msg_order.km_flags & (GSS_C_REPLAY_FLAG | GSS_C_SEQUENCE_FLAG)) { res = krb5_sequence_check(kc, seq); if (GSS_ERROR(res)) return (res); } else { res = GSS_S_COMPLETE; } /* * If the token was encrypted, decode it in-place. */ if (conf) { /* * Decrypt the padded message with an IV of zero for * DES and DES3 or an IV of the big-endian encoded * sequence number for ARCFOUR. */ if (seal_alg[0] == 0x10) { krb5_decrypt(kc->kc_encryptkey, m, 16 + cklen, datalen, p, 4); } else { krb5_decrypt(kc->kc_encryptkey, m, 16 + cklen, datalen, NULL, 0); } } if (conf_state) *conf_state = conf; /* * Check the trailing pad bytes. * RFC1964 specifies between 1<->8 bytes, each with a binary value * equal to the number of bytes. */ if (mlast->m_len > 0) padlen = mlast->m_data[mlast->m_len - 1]; else { n = m_getptr(m, tlen + datalen - 1, &i); /* * When the position is exactly equal to the # of data bytes * in the mbuf list, m_getptr() will return the last mbuf in * the list and an off == m_len for that mbuf, so that case * needs to be checked as well as a NULL return. */ if (n == NULL || n->m_len == i) return (GSS_S_DEFECTIVE_TOKEN); padlen = n->m_data[i]; } if (padlen < 1 || padlen > 8 || padlen > tlen + datalen) return (GSS_S_DEFECTIVE_TOKEN); m_copydata(m, tlen + datalen - padlen, padlen, buf); for (i = 0; i < padlen; i++) { if (buf[i] != padlen) { return (GSS_S_DEFECTIVE_TOKEN); } } /* * SGN_CKSUM: * * Calculate the keyed checksum of the token header plus the * padded message. We do a little mbuf surgery to trim out the * parts we don't want to checksum. */ hm = m; *mp = m = m_split(m, 16 + cklen, M_WAITOK); mlast = m_last(m); hm->m_len = 8; hm->m_next = m; MGET(cm, M_WAITOK, MT_DATA); cm->m_len = cklen; mlast->m_next = cm; krb5_checksum(kc->kc_checksumkey, 13, hm, 0, datalen + 8, cklen); hm->m_next = NULL; mlast->m_next = NULL; if (bcmp(cm->m_data, hm->m_data + 16, cklen)) { m_freem(hm); m_free(cm); return (GSS_S_BAD_SIG); } m_freem(hm); m_free(cm); /* * Trim off the confounder and padding. */ m_adj(m, 8); if (mlast->m_len >= padlen) { mlast->m_len -= padlen; } else { m_trim(m, datalen - 8 - padlen); } *mp = m; return (res); } static OM_uint32 krb5_unwrap_new(struct krb5_context *kc, struct mbuf **mp, int *conf_state) { OM_uint32 res; struct krb5_key_state *Ke = kc->kc_recv_seal_Ke; struct krb5_key_state *Ki = kc->kc_recv_seal_Ki; struct krb5_key_state *Kc = kc->kc_recv_seal_Kc; const struct krb5_encryption_class *ec = Ke->ks_class; struct mbuf *m, *mlast, *hm, *cm; uint8_t *p; int sealed, flags, EC, RRC; size_t blen, cklen, ctlen, mlen, plen, tlen; char buf[32], buf2[32]; m = *mp; mlen = m_length(m, &mlast); if (mlen <= 16) return (GSS_S_DEFECTIVE_TOKEN); if (m->m_len < 16) { m = m_pullup(m, 16); *mp = m; } p = m->m_data; /* TOK_ID */ if (p[0] != 0x05) return (GSS_S_DEFECTIVE_TOKEN); if (p[1] != 0x04) return (GSS_S_DEFECTIVE_TOKEN); /* Flags */ sealed = p[2] & GSS_TOKEN_SEALED; flags = sealed; if (is_initiator(kc)) flags |= GSS_TOKEN_SENT_BY_ACCEPTOR; if (kc->kc_more_flags & ACCEPTOR_SUBKEY) flags |= GSS_TOKEN_ACCEPTOR_SUBKEY; if (p[2] != flags) return (GSS_S_DEFECTIVE_TOKEN); /* Filler */ if (p[3] != 0xff) return (GSS_S_DEFECTIVE_TOKEN); /* EC + RRC */ EC = (p[4] << 8) + p[5]; RRC = (p[6] << 8) + p[7]; /* SND_SEQ */ if (kc->kc_msg_order.km_flags & (GSS_C_REPLAY_FLAG | GSS_C_SEQUENCE_FLAG)) { uint32_t seq; if (p[8] || p[9] || p[10] || p[11]) { res = GSS_S_UNSEQ_TOKEN; } else { seq = (p[12] << 24) | (p[13] << 16) | (p[14] << 8) | p[15]; res = krb5_sequence_check(kc, seq); } if (GSS_ERROR(res)) return (res); } else { res = GSS_S_COMPLETE; } /* * Separate the header before dealing with RRC. We only need * to keep the header if the message isn't encrypted. */ if (sealed) { hm = NULL; m_adj(m, 16); } else { hm = m; *mp = m = m_split(m, 16, M_WAITOK); mlast = m_last(m); } /* * Undo the effects of RRC by rotating left. */ if (RRC > 0) { struct mbuf *rm; size_t rlen; rlen = mlen - 16; if (RRC <= sizeof(buf) && m->m_len >= rlen) { /* * Simple case, just rearrange the bytes in m. */ bcopy(m->m_data, buf, RRC); bcopy(m->m_data + RRC, m->m_data, rlen - RRC); bcopy(buf, m->m_data + rlen - RRC, RRC); } else { /* * More complicated - rearrange the mbuf * chain. */ rm = m; *mp = m = m_split(m, RRC, M_WAITOK); m_cat(m, rm); mlast = rm; } } blen = ec->ec_blocklen; cklen = ec->ec_checksumlen; if (sealed) { /* * Decrypt according to RFC 4121 section 4.2 and RFC * 3961 section 5.3. The message must be large enough * for a blocksize confounder, at least one block of * cyphertext and a checksum. */ if (mlen < 16 + 2*blen + cklen) return (GSS_S_DEFECTIVE_TOKEN); ctlen = mlen - 16 - cklen; krb5_decrypt(Ke, m, 0, ctlen, NULL, 0); /* * The size of the plaintext is ctlen minus blocklen * (for the confounder), 16 (for the copy of the token * header) and EC (for the filler). The actual * plaintext starts after the confounder. */ plen = ctlen - blen - 16 - EC; /* * Checksum the padded plaintext. */ m_copydata(m, ctlen, cklen, buf); krb5_checksum(Ki, 0, m, 0, ctlen, cklen); m_copydata(m, ctlen, cklen, buf2); if (bcmp(buf, buf2, cklen)) return (GSS_S_BAD_SIG); /* * Trim the message back to just plaintext. */ m_adj(m, blen); tlen = 16 + EC + cklen; if (mlast->m_len >= tlen) { mlast->m_len -= tlen; } else { m_trim(m, plen); } } else { /* * The plaintext message is followed by a checksum of * the plaintext plus a version of the header where EC * and RRC are set to zero. Also, the original EC must * be our checksum size. */ if (mlen < 16 + cklen || EC != cklen) return (GSS_S_DEFECTIVE_TOKEN); /* * The size of the plaintext is simply the message * size less header and checksum. The plaintext starts * right after the header (which we have saved in hm). */ plen = mlen - 16 - cklen; /* * Insert a copy of the header (with EC and RRC set to * zero) between the plaintext message and the * checksum. */ p = hm->m_data; p[4] = p[5] = p[6] = p[7] = 0; cm = m_split(m, plen, M_WAITOK); mlast = m_last(m); m->m_next = hm; hm->m_next = cm; bcopy(cm->m_data, buf, cklen); krb5_checksum(Kc, 0, m, 0, plen + 16, cklen); if (bcmp(cm->m_data, buf, cklen)) return (GSS_S_BAD_SIG); /* * The checksum matches, discard all buf the plaintext. */ mlast->m_next = NULL; m_freem(hm); } if (conf_state) *conf_state = (sealed != 0); return (res); } static OM_uint32 krb5_unwrap(gss_ctx_id_t ctx, OM_uint32 *minor_status, struct mbuf **mp, int *conf_state, gss_qop_t *qop_state) { struct krb5_context *kc = (struct krb5_context *)ctx; OM_uint32 maj_stat; *minor_status = 0; if (qop_state) *qop_state = GSS_C_QOP_DEFAULT; if (conf_state) *conf_state = 0; if (time_uptime > kc->kc_lifetime) return (GSS_S_CONTEXT_EXPIRED); switch (kc->kc_tokenkey->ks_class->ec_type) { case ETYPE_DES_CBC_CRC: maj_stat = krb5_unwrap_old(kc, mp, conf_state, sgn_alg_des_md5, seal_alg_des); break; case ETYPE_ARCFOUR_HMAC_MD5: case ETYPE_ARCFOUR_HMAC_MD5_56: maj_stat = krb5_unwrap_old(kc, mp, conf_state, sgn_alg_hmac_md5, seal_alg_rc4); break; case ETYPE_DES3_CBC_SHA1: maj_stat = krb5_unwrap_old(kc, mp, conf_state, sgn_alg_des3_sha1, seal_alg_des3); break; default: maj_stat = krb5_unwrap_new(kc, mp, conf_state); break; } if (GSS_ERROR(maj_stat)) { m_freem(*mp); *mp = NULL; } return (maj_stat); } static OM_uint32 krb5_wrap_size_limit(gss_ctx_id_t ctx, OM_uint32 *minor_status, int conf_req_flag, gss_qop_t qop_req, OM_uint32 req_output_size, OM_uint32 *max_input_size) { struct krb5_context *kc = (struct krb5_context *)ctx; const struct krb5_encryption_class *ec; OM_uint32 overhead; *minor_status = 0; *max_input_size = 0; if (qop_req != GSS_C_QOP_DEFAULT) return (GSS_S_BAD_QOP); ec = kc->kc_tokenkey->ks_class; switch (ec->ec_type) { case ETYPE_DES_CBC_CRC: case ETYPE_DES3_CBC_SHA1: case ETYPE_ARCFOUR_HMAC_MD5: case ETYPE_ARCFOUR_HMAC_MD5_56: /* * up to 5 bytes for [APPLICATION 0] SEQUENCE * 2 + krb5 oid length * 8 bytes of header * 8 bytes of confounder * maximum of 8 bytes of padding * checksum */ overhead = 5 + 2 + krb5_mech_oid.length; overhead += 8 + 8 + ec->ec_msgblocklen; overhead += ec->ec_checksumlen; break; default: if (conf_req_flag) { /* * 16 byts of header * blocklen bytes of confounder * up to msgblocklen - 1 bytes of padding * 16 bytes for copy of header * checksum */ overhead = 16 + ec->ec_blocklen; overhead += ec->ec_msgblocklen - 1; overhead += 16; overhead += ec->ec_checksumlen; } else { /* * 16 bytes of header plus checksum. */ overhead = 16 + ec->ec_checksumlen; } } *max_input_size = req_output_size - overhead; return (GSS_S_COMPLETE); } static kobj_method_t krb5_methods[] = { KOBJMETHOD(kgss_init, krb5_init), KOBJMETHOD(kgss_import, krb5_import), KOBJMETHOD(kgss_delete, krb5_delete), KOBJMETHOD(kgss_mech_type, krb5_mech_type), KOBJMETHOD(kgss_get_mic, krb5_get_mic), KOBJMETHOD(kgss_verify_mic, krb5_verify_mic), KOBJMETHOD(kgss_wrap, krb5_wrap), KOBJMETHOD(kgss_unwrap, krb5_unwrap), KOBJMETHOD(kgss_wrap_size_limit, krb5_wrap_size_limit), { 0, 0 } }; static struct kobj_class krb5_class = { "kerberosv5", krb5_methods, sizeof(struct krb5_context) }; /* * Kernel module glue */ static int kgssapi_krb5_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: kgss_install_mech(&krb5_mech_oid, "kerberosv5", &krb5_class); break; case MOD_UNLOAD: kgss_uninstall_mech(&krb5_mech_oid); break; } return (0); } static moduledata_t kgssapi_krb5_mod = { "kgssapi_krb5", kgssapi_krb5_modevent, NULL, }; DECLARE_MODULE(kgssapi_krb5, kgssapi_krb5_mod, SI_SUB_VFS, SI_ORDER_ANY); MODULE_DEPEND(kgssapi_krb5, kgssapi, 1, 1, 1); MODULE_DEPEND(kgssapi_krb5, crypto, 1, 1, 1); MODULE_DEPEND(kgssapi_krb5, rc4, 1, 1, 1); MODULE_VERSION(kgssapi_krb5, 1); diff --git a/sys/rpc/rpcsec_gss/rpcsec_gss.c b/sys/rpc/rpcsec_gss/rpcsec_gss.c index 983dd251f81f..53770d139c61 100644 --- a/sys/rpc/rpcsec_gss/rpcsec_gss.c +++ b/sys/rpc/rpcsec_gss/rpcsec_gss.c @@ -1,1191 +1,1215 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2008 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* auth_gss.c RPCSEC_GSS client routines. Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: auth_gss.c,v 1.32 2002/01/15 15:43:00 andros Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rpcsec_gss_int.h" static void rpc_gss_nextverf(AUTH*); static bool_t rpc_gss_marshal(AUTH *, uint32_t, XDR *, struct mbuf *); static bool_t rpc_gss_init(AUTH *auth, rpc_gss_options_ret_t *options_ret); static bool_t rpc_gss_refresh(AUTH *, void *); static bool_t rpc_gss_validate(AUTH *, uint32_t, struct opaque_auth *, struct mbuf **); static void rpc_gss_destroy(AUTH *); static void rpc_gss_destroy_context(AUTH *, bool_t); static const struct auth_ops rpc_gss_ops = { .ah_nextverf = rpc_gss_nextverf, .ah_marshal = rpc_gss_marshal, .ah_validate = rpc_gss_validate, .ah_refresh = rpc_gss_refresh, .ah_destroy = rpc_gss_destroy, }; enum rpcsec_gss_state { RPCSEC_GSS_START, RPCSEC_GSS_CONTEXT, RPCSEC_GSS_ESTABLISHED, RPCSEC_GSS_DESTROYING }; struct rpc_pending_request { uint32_t pr_xid; /* XID of rpc */ uint32_t pr_seq; /* matching GSS seq */ LIST_ENTRY(rpc_pending_request) pr_link; }; LIST_HEAD(rpc_pending_request_list, rpc_pending_request); struct rpc_gss_data { volatile u_int gd_refs; /* number of current users */ struct mtx gd_lock; uint32_t gd_hash; AUTH *gd_auth; /* link back to AUTH */ struct ucred *gd_ucred; /* matching local cred */ char *gd_principal; /* server principal name */ char *gd_clntprincipal; /* client principal name */ rpc_gss_options_req_t gd_options; /* GSS context options */ enum rpcsec_gss_state gd_state; /* connection state */ gss_buffer_desc gd_verf; /* save GSS_S_COMPLETE * NULL RPC verfier to * process at end of * context negotiation */ CLIENT *gd_clnt; /* client handle */ gss_OID gd_mech; /* mechanism to use */ gss_qop_t gd_qop; /* quality of protection */ gss_ctx_id_t gd_ctx; /* context id */ struct rpc_gss_cred gd_cred; /* client credentials */ uint32_t gd_seq; /* next sequence number */ u_int gd_win; /* sequence window */ struct rpc_pending_request_list gd_reqs; TAILQ_ENTRY(rpc_gss_data) gd_link; TAILQ_ENTRY(rpc_gss_data) gd_alllink; }; TAILQ_HEAD(rpc_gss_data_list, rpc_gss_data); #define AUTH_PRIVATE(auth) ((struct rpc_gss_data *)auth->ah_private) static struct timeval AUTH_TIMEOUT = { 25, 0 }; #define RPC_GSS_HASH_SIZE 11 #define RPC_GSS_MAX 256 static struct rpc_gss_data_list rpc_gss_cache[RPC_GSS_HASH_SIZE]; static struct rpc_gss_data_list rpc_gss_all; static struct sx rpc_gss_lock; static int rpc_gss_count; static AUTH *rpc_gss_seccreate_int(CLIENT *, struct ucred *, const char *, const char *, gss_OID, rpc_gss_service_t, u_int, rpc_gss_options_req_t *, rpc_gss_options_ret_t *); static void rpc_gss_hashinit(void *dummy) { int i; for (i = 0; i < RPC_GSS_HASH_SIZE; i++) TAILQ_INIT(&rpc_gss_cache[i]); TAILQ_INIT(&rpc_gss_all); sx_init(&rpc_gss_lock, "rpc_gss_lock"); } SYSINIT(rpc_gss_hashinit, SI_SUB_KMEM, SI_ORDER_ANY, rpc_gss_hashinit, NULL); static uint32_t rpc_gss_hash(const char *principal, gss_OID mech, struct ucred *cred, rpc_gss_service_t service) { uint32_t h; h = HASHSTEP(HASHINIT, cred->cr_uid); h = hash32_str(principal, h); h = hash32_buf(mech->elements, mech->length, h); h = HASHSTEP(h, (int) service); return (h % RPC_GSS_HASH_SIZE); } /* * Simplified interface to create a security association for the * current thread's * ucred. */ AUTH * rpc_gss_secfind(CLIENT *clnt, struct ucred *cred, const char *principal, gss_OID mech_oid, rpc_gss_service_t service) { uint32_t h, th; AUTH *auth; struct rpc_gss_data *gd, *tgd; rpc_gss_options_ret_t options; if (rpc_gss_count > RPC_GSS_MAX) { while (rpc_gss_count > RPC_GSS_MAX) { sx_xlock(&rpc_gss_lock); tgd = TAILQ_FIRST(&rpc_gss_all); th = tgd->gd_hash; TAILQ_REMOVE(&rpc_gss_cache[th], tgd, gd_link); TAILQ_REMOVE(&rpc_gss_all, tgd, gd_alllink); rpc_gss_count--; sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(tgd->gd_auth); } } /* * See if we already have an AUTH which matches. */ h = rpc_gss_hash(principal, mech_oid, cred, service); again: sx_slock(&rpc_gss_lock); TAILQ_FOREACH(gd, &rpc_gss_cache[h], gd_link) { if (gd->gd_ucred->cr_uid == cred->cr_uid && !strcmp(gd->gd_principal, principal) && gd->gd_mech == mech_oid && gd->gd_cred.gc_svc == service) { refcount_acquire(&gd->gd_refs); if (sx_try_upgrade(&rpc_gss_lock)) { /* * Keep rpc_gss_all LRU sorted. */ TAILQ_REMOVE(&rpc_gss_all, gd, gd_alllink); TAILQ_INSERT_TAIL(&rpc_gss_all, gd, gd_alllink); sx_xunlock(&rpc_gss_lock); } else { sx_sunlock(&rpc_gss_lock); } /* * If the state != ESTABLISHED, try and initialize * the authenticator again. This will happen if the * user's credentials have expired. It may succeed now, * if they have done a kinit or similar. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { memset(&options, 0, sizeof (options)); (void) rpc_gss_init(gd->gd_auth, &options); } return (gd->gd_auth); } } sx_sunlock(&rpc_gss_lock); /* * We missed in the cache - create a new association. */ auth = rpc_gss_seccreate_int(clnt, cred, NULL, principal, mech_oid, service, GSS_C_QOP_DEFAULT, NULL, NULL); if (!auth) return (NULL); gd = AUTH_PRIVATE(auth); gd->gd_hash = h; sx_xlock(&rpc_gss_lock); TAILQ_FOREACH(tgd, &rpc_gss_cache[h], gd_link) { if (tgd->gd_ucred->cr_uid == cred->cr_uid && !strcmp(tgd->gd_principal, principal) && tgd->gd_mech == mech_oid && tgd->gd_cred.gc_svc == service) { /* * We lost a race to create the AUTH that * matches this cred. */ sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(auth); goto again; } } rpc_gss_count++; TAILQ_INSERT_TAIL(&rpc_gss_cache[h], gd, gd_link); TAILQ_INSERT_TAIL(&rpc_gss_all, gd, gd_alllink); refcount_acquire(&gd->gd_refs); /* one for the cache, one for user */ sx_xunlock(&rpc_gss_lock); return (auth); } void rpc_gss_secpurge(CLIENT *clnt) { uint32_t h; struct rpc_gss_data *gd, *tgd; TAILQ_FOREACH_SAFE(gd, &rpc_gss_all, gd_alllink, tgd) { if (gd->gd_clnt == clnt) { sx_xlock(&rpc_gss_lock); h = gd->gd_hash; TAILQ_REMOVE(&rpc_gss_cache[h], gd, gd_link); TAILQ_REMOVE(&rpc_gss_all, gd, gd_alllink); rpc_gss_count--; sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(gd->gd_auth); } } } AUTH * rpc_gss_seccreate(CLIENT *clnt, struct ucred *cred, const char *clnt_principal, const char *principal, const char *mechanism, rpc_gss_service_t service, const char *qop, rpc_gss_options_req_t *options_req, rpc_gss_options_ret_t *options_ret) { gss_OID oid; u_int qop_num; /* * Bail out now if we don't know this mechanism. */ if (!rpc_gss_mech_to_oid(mechanism, &oid)) return (NULL); if (qop) { if (!rpc_gss_qop_to_num(qop, mechanism, &qop_num)) return (NULL); } else { qop_num = GSS_C_QOP_DEFAULT; } return (rpc_gss_seccreate_int(clnt, cred, clnt_principal, principal, oid, service, qop_num, options_req, options_ret)); } void rpc_gss_refresh_auth(AUTH *auth) { struct rpc_gss_data *gd; rpc_gss_options_ret_t options; gd = AUTH_PRIVATE(auth); /* * If the state != ESTABLISHED, try and initialize * the authenticator again. This will happen if the * user's credentials have expired. It may succeed now, * if they have done a kinit or similar. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { memset(&options, 0, sizeof (options)); (void) rpc_gss_init(auth, &options); } } static AUTH * rpc_gss_seccreate_int(CLIENT *clnt, struct ucred *cred, const char *clnt_principal, const char *principal, gss_OID mech_oid, rpc_gss_service_t service, u_int qop_num, rpc_gss_options_req_t *options_req, rpc_gss_options_ret_t *options_ret) { AUTH *auth; rpc_gss_options_ret_t options; struct rpc_gss_data *gd; /* * If the caller doesn't want the options, point at local * storage to simplify the code below. */ if (!options_ret) options_ret = &options; /* * Default service is integrity. */ if (service == rpc_gss_svc_default) service = rpc_gss_svc_integrity; memset(options_ret, 0, sizeof(*options_ret)); rpc_gss_log_debug("in rpc_gss_seccreate()"); memset(&rpc_createerr, 0, sizeof(rpc_createerr)); auth = mem_alloc(sizeof(*auth)); if (auth == NULL) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = ENOMEM; return (NULL); } gd = mem_alloc(sizeof(*gd)); if (gd == NULL) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = ENOMEM; mem_free(auth, sizeof(*auth)); return (NULL); } auth->ah_ops = &rpc_gss_ops; auth->ah_private = (caddr_t) gd; auth->ah_cred.oa_flavor = RPCSEC_GSS; refcount_init(&gd->gd_refs, 1); mtx_init(&gd->gd_lock, "gd->gd_lock", NULL, MTX_DEF); gd->gd_auth = auth; gd->gd_ucred = crdup(cred); gd->gd_principal = strdup(principal, M_RPC); if (clnt_principal != NULL) gd->gd_clntprincipal = strdup(clnt_principal, M_RPC); else gd->gd_clntprincipal = NULL; if (options_req) { gd->gd_options = *options_req; } else { gd->gd_options.req_flags = GSS_C_MUTUAL_FLAG; gd->gd_options.time_req = 0; gd->gd_options.my_cred = GSS_C_NO_CREDENTIAL; gd->gd_options.input_channel_bindings = NULL; } CLNT_ACQUIRE(clnt); gd->gd_clnt = clnt; gd->gd_ctx = GSS_C_NO_CONTEXT; gd->gd_mech = mech_oid; gd->gd_qop = qop_num; gd->gd_cred.gc_version = RPCSEC_GSS_VERSION; gd->gd_cred.gc_proc = RPCSEC_GSS_INIT; gd->gd_cred.gc_seq = 0; gd->gd_cred.gc_svc = service; LIST_INIT(&gd->gd_reqs); if (!rpc_gss_init(auth, options_ret)) { goto bad; } return (auth); bad: AUTH_DESTROY(auth); return (NULL); } bool_t rpc_gss_set_defaults(AUTH *auth, rpc_gss_service_t service, const char *qop) { struct rpc_gss_data *gd; u_int qop_num; const char *mechanism; gd = AUTH_PRIVATE(auth); if (!rpc_gss_oid_to_mech(gd->gd_mech, &mechanism)) { return (FALSE); } if (qop) { if (!rpc_gss_qop_to_num(qop, mechanism, &qop_num)) { return (FALSE); } } else { qop_num = GSS_C_QOP_DEFAULT; } gd->gd_cred.gc_svc = service; gd->gd_qop = qop_num; return (TRUE); } static void rpc_gss_purge_xid(struct rpc_gss_data *gd, uint32_t xid) { struct rpc_pending_request *pr, *npr; struct rpc_pending_request_list reqs; LIST_INIT(&reqs); mtx_lock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &gd->gd_reqs, pr_link, npr) { if (pr->pr_xid == xid) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&reqs, pr, pr_link); } } mtx_unlock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) { mem_free(pr, sizeof(*pr)); } } static uint32_t rpc_gss_alloc_seq(struct rpc_gss_data *gd) { uint32_t seq; mtx_lock(&gd->gd_lock); seq = gd->gd_seq; gd->gd_seq++; mtx_unlock(&gd->gd_lock); return (seq); } static void rpc_gss_nextverf(__unused AUTH *auth) { /* not used */ } static bool_t rpc_gss_marshal(AUTH *auth, uint32_t xid, XDR *xdrs, struct mbuf *args) { struct rpc_gss_data *gd; struct rpc_pending_request *pr; uint32_t seq; XDR tmpxdrs; struct rpc_gss_cred gsscred; char credbuf[MAX_AUTH_BYTES]; struct opaque_auth creds, verf; gss_buffer_desc rpcbuf, checksum; OM_uint32 maj_stat, min_stat; bool_t xdr_stat; rpc_gss_log_debug("in rpc_gss_marshal()"); gd = AUTH_PRIVATE(auth); gsscred = gd->gd_cred; seq = rpc_gss_alloc_seq(gd); gsscred.gc_seq = seq; xdrmem_create(&tmpxdrs, credbuf, sizeof(credbuf), XDR_ENCODE); if (!xdr_rpc_gss_cred(&tmpxdrs, &gsscred)) { XDR_DESTROY(&tmpxdrs); _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } creds.oa_flavor = RPCSEC_GSS; creds.oa_base = credbuf; creds.oa_length = XDR_GETPOS(&tmpxdrs); XDR_DESTROY(&tmpxdrs); xdr_opaque_auth(xdrs, &creds); if (gd->gd_cred.gc_proc == RPCSEC_GSS_INIT || gd->gd_cred.gc_proc == RPCSEC_GSS_CONTINUE_INIT) { if (!xdr_opaque_auth(xdrs, &_null_auth)) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } return (xdr_putmbuf(xdrs, args)); } else { /* * Keep track of this XID + seq pair so that we can do * the matching gss_verify_mic in AUTH_VALIDATE. */ pr = mem_alloc(sizeof(struct rpc_pending_request)); mtx_lock(&gd->gd_lock); pr->pr_xid = xid; pr->pr_seq = seq; LIST_INSERT_HEAD(&gd->gd_reqs, pr, pr_link); mtx_unlock(&gd->gd_lock); /* * Checksum serialized RPC header, up to and including * credential. For the in-kernel environment, we * assume that our XDR stream is on a contiguous * memory buffer (e.g. an mbuf). */ rpcbuf.length = XDR_GETPOS(xdrs); XDR_SETPOS(xdrs, 0); rpcbuf.value = XDR_INLINE(xdrs, rpcbuf.length); maj_stat = gss_get_mic(&min_stat, gd->gd_ctx, gd->gd_qop, &rpcbuf, &checksum); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_get_mic", gd->gd_mech, maj_stat, min_stat); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); } _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); return (FALSE); } verf.oa_flavor = RPCSEC_GSS; verf.oa_base = checksum.value; verf.oa_length = checksum.length; xdr_stat = xdr_opaque_auth(xdrs, &verf); gss_release_buffer(&min_stat, &checksum); if (!xdr_stat) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } if (gd->gd_state != RPCSEC_GSS_ESTABLISHED || gd->gd_cred.gc_svc == rpc_gss_svc_none) { return (xdr_putmbuf(xdrs, args)); } else { if (!xdr_rpc_gss_wrap_data(&args, gd->gd_ctx, gd->gd_qop, gd->gd_cred.gc_svc, seq)) return (FALSE); return (xdr_putmbuf(xdrs, args)); } } return (TRUE); } static bool_t rpc_gss_validate(AUTH *auth, uint32_t xid, struct opaque_auth *verf, struct mbuf **resultsp) { struct rpc_gss_data *gd; struct rpc_pending_request *pr, *npr; struct rpc_pending_request_list reqs; gss_qop_t qop_state; uint32_t num, seq; gss_buffer_desc signbuf, checksum; OM_uint32 maj_stat, min_stat; rpc_gss_log_debug("in rpc_gss_validate()"); gd = AUTH_PRIVATE(auth); /* * The client will call us with a NULL verf when it gives up * on an XID. */ if (!verf) { rpc_gss_purge_xid(gd, xid); return (TRUE); } if (gd->gd_state == RPCSEC_GSS_CONTEXT) { /* * Save the on the wire verifier to validate last INIT * phase packet after decode if the major status is * GSS_S_COMPLETE. */ if (gd->gd_verf.value) xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); gd->gd_verf.value = mem_alloc(verf->oa_length); if (gd->gd_verf.value == NULL) { printf("gss_validate: out of memory\n"); _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); m_freem(*resultsp); *resultsp = NULL; return (FALSE); } memcpy(gd->gd_verf.value, verf->oa_base, verf->oa_length); gd->gd_verf.length = verf->oa_length; return (TRUE); } /* * We need to check the verifier against all the requests * we've send for this XID - for unreliable protocols, we * retransmit with the same XID but different sequence * number. We temporarily take this set of requests out of the * list so that we can work through the list without having to * hold the lock. */ mtx_lock(&gd->gd_lock); LIST_INIT(&reqs); LIST_FOREACH_SAFE(pr, &gd->gd_reqs, pr_link, npr) { if (pr->pr_xid == xid) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&reqs, pr, pr_link); } } mtx_unlock(&gd->gd_lock); LIST_FOREACH(pr, &reqs, pr_link) { if (pr->pr_xid == xid) { seq = pr->pr_seq; num = htonl(seq); signbuf.value = # signbuf.length = sizeof(num); checksum.value = verf->oa_base; checksum.length = verf->oa_length; maj_stat = gss_verify_mic(&min_stat, gd->gd_ctx, &signbuf, &checksum, &qop_state); if (maj_stat != GSS_S_COMPLETE || qop_state != gd->gd_qop) { continue; } if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); break; } //rpc_gss_purge_reqs(gd, seq); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) mem_free(pr, sizeof(*pr)); if (gd->gd_cred.gc_svc == rpc_gss_svc_none) { return (TRUE); } else { if (!xdr_rpc_gss_unwrap_data(resultsp, gd->gd_ctx, gd->gd_qop, gd->gd_cred.gc_svc, seq)) { return (FALSE); } } return (TRUE); } } /* * We didn't match - put back any entries for this XID so that * a future call to validate can retry. */ mtx_lock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&gd->gd_reqs, pr, pr_link); } mtx_unlock(&gd->gd_lock); /* * Nothing matches - give up. */ _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); m_freem(*resultsp); *resultsp = NULL; return (FALSE); } static bool_t rpc_gss_init(AUTH *auth, rpc_gss_options_ret_t *options_ret) { struct thread *td = curthread; struct ucred *crsave; struct rpc_gss_data *gd; struct rpc_gss_init_res gr; gss_buffer_desc principal_desc; gss_buffer_desc *recv_tokenp, recv_token, send_token; gss_name_t name; OM_uint32 maj_stat, min_stat, call_stat; const char *mech; struct rpc_callextra ext; gss_OID mech_oid; gss_OID_set mechlist; + static enum krb_imp my_krb_imp = KRBIMP_UNKNOWN; rpc_gss_log_debug("in rpc_gss_refresh()"); gd = AUTH_PRIVATE(auth); mtx_lock(&gd->gd_lock); /* * If the context isn't in START state, someone else is * refreshing - we wait till they are done. If they fail, they * will put the state back to START and we can try (most * likely to also fail). */ while (gd->gd_state != RPCSEC_GSS_START && gd->gd_state != RPCSEC_GSS_ESTABLISHED) { msleep(gd, &gd->gd_lock, 0, "gssstate", 0); } if (gd->gd_state == RPCSEC_GSS_ESTABLISHED) { mtx_unlock(&gd->gd_lock); return (TRUE); } gd->gd_state = RPCSEC_GSS_CONTEXT; mtx_unlock(&gd->gd_lock); gd->gd_cred.gc_proc = RPCSEC_GSS_INIT; gd->gd_cred.gc_seq = 0; /* * XXX Threads from inside jails can get here via calls * to clnt_vc_call()->AUTH_REFRESH()->rpc_gss_refresh() * but the NFS mount is always done outside of the * jails in vnet0. Since the thread credentials won't * necessarily have cr_prison == vnet0 and this function * has no access to the socket, using vnet0 seems the * only option. This is broken if NFS mounts are enabled * within vnet prisons. */ KGSS_CURVNET_SET_QUIET(vnet0); /* * For KerberosV, if there is a client principal name, that implies * that this is a host based initiator credential in the default * keytab file. For this case, it is necessary to do a * gss_acquire_cred(). When this is done, the gssd daemon will * do the equivalent of "kinit -k" to put a TGT for the name in * the credential cache file for the gssd daemon. */ if (gd->gd_clntprincipal != NULL && rpc_gss_mech_to_oid("kerberosv5", &mech_oid) && gd->gd_mech == mech_oid) { /* Get rid of any old credential. */ if (gd->gd_options.my_cred != GSS_C_NO_CREDENTIAL) { gss_release_cred(&min_stat, &gd->gd_options.my_cred); gd->gd_options.my_cred = GSS_C_NO_CREDENTIAL; } /* * The mechanism must be set to KerberosV for acquisition * of credentials to work reliably. */ maj_stat = gss_create_empty_oid_set(&min_stat, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } maj_stat = gss_add_oid_set_member(&min_stat, gd->gd_mech, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; gss_release_oid_set(&min_stat, &mechlist); goto out; } principal_desc.value = (void *)gd->gd_clntprincipal; principal_desc.length = strlen(gd->gd_clntprincipal); maj_stat = gss_import_name(&min_stat, &principal_desc, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; gss_release_oid_set(&min_stat, &mechlist); goto out; } /* Acquire the credentials. */ maj_stat = gss_acquire_cred(&min_stat, name, 0, mechlist, GSS_C_INITIATE, &gd->gd_options.my_cred, NULL, NULL); gss_release_name(&min_stat, &name); gss_release_oid_set(&min_stat, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } } principal_desc.value = (void *)gd->gd_principal; principal_desc.length = strlen(gd->gd_principal); maj_stat = gss_import_name(&min_stat, &principal_desc, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } + if (my_krb_imp == KRBIMP_UNKNOWN) { + maj_stat = gss_supports_lucid(&min_stat, NULL); + if (maj_stat == GSS_S_COMPLETE) + my_krb_imp = KRBIMP_MIT; + else + my_krb_imp = KRBIMP_HESIOD1; + } + /* GSS context establishment loop. */ memset(&recv_token, 0, sizeof(recv_token)); memset(&gr, 0, sizeof(gr)); memset(options_ret, 0, sizeof(*options_ret)); options_ret->major_status = GSS_S_FAILURE; recv_tokenp = GSS_C_NO_BUFFER; for (;;) { crsave = td->td_ucred; td->td_ucred = gd->gd_ucred; - maj_stat = gss_init_sec_context(&min_stat, - gd->gd_options.my_cred, - &gd->gd_ctx, - name, - gd->gd_mech, - gd->gd_options.req_flags, - gd->gd_options.time_req, - gd->gd_options.input_channel_bindings, - recv_tokenp, - &gd->gd_mech, /* used mech */ - &send_token, - &options_ret->ret_flags, - &options_ret->time_req); + if (my_krb_imp == KRBIMP_MIT) + maj_stat = gss_init_sec_context_lucid_v1(&min_stat, + gd->gd_options.my_cred, + &gd->gd_ctx, + name, + gd->gd_mech, + gd->gd_options.req_flags, + gd->gd_options.time_req, + gd->gd_options.input_channel_bindings, + recv_tokenp, + &gd->gd_mech, /* used mech */ + &send_token, + &options_ret->ret_flags, + &options_ret->time_req); + else + maj_stat = gss_init_sec_context(&min_stat, + gd->gd_options.my_cred, + &gd->gd_ctx, + name, + gd->gd_mech, + gd->gd_options.req_flags, + gd->gd_options.time_req, + gd->gd_options.input_channel_bindings, + recv_tokenp, + &gd->gd_mech, /* used mech */ + &send_token, + &options_ret->ret_flags, + &options_ret->time_req); td->td_ucred = crsave; /* * Free the token which we got from the server (if * any). Remember that this was allocated by XDR, not * GSS-API. */ if (recv_tokenp != GSS_C_NO_BUFFER) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_token); recv_tokenp = GSS_C_NO_BUFFER; } if (gd->gd_mech && rpc_gss_oid_to_mech(gd->gd_mech, &mech)) { strlcpy(options_ret->actual_mechanism, mech, sizeof(options_ret->actual_mechanism)); } if (maj_stat != GSS_S_COMPLETE && maj_stat != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("gss_init_sec_context", gd->gd_mech, maj_stat, min_stat); options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; break; } if (send_token.length != 0) { memset(&gr, 0, sizeof(gr)); bzero(&ext, sizeof(ext)); ext.rc_auth = auth; call_stat = CLNT_CALL_EXT(gd->gd_clnt, &ext, NULLPROC, (xdrproc_t)xdr_gss_buffer_desc, &send_token, (xdrproc_t)xdr_rpc_gss_init_res, (caddr_t)&gr, AUTH_TIMEOUT); gss_release_buffer(&min_stat, &send_token); if (call_stat != RPC_SUCCESS) break; if (gr.gr_major != GSS_S_COMPLETE && gr.gr_major != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("server reply", gd->gd_mech, gr.gr_major, gr.gr_minor); options_ret->major_status = gr.gr_major; options_ret->minor_status = gr.gr_minor; break; } /* * Save the server's gr_handle value, freeing * what we have already (remember that this * was allocated by XDR, not GSS-API). */ if (gr.gr_handle.length != 0) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_cred.gc_handle); gd->gd_cred.gc_handle = gr.gr_handle; } /* * Save the server's token as well. */ if (gr.gr_token.length != 0) { recv_token = gr.gr_token; recv_tokenp = &recv_token; } /* * Since we have copied out all the bits of gr * which XDR allocated for us, we don't need * to free it. */ gd->gd_cred.gc_proc = RPCSEC_GSS_CONTINUE_INIT; } if (maj_stat == GSS_S_COMPLETE) { gss_buffer_desc bufin; u_int seq, qop_state = 0; /* * gss header verifier, * usually checked in gss_validate */ seq = htonl(gr.gr_win); bufin.value = (unsigned char *)&seq; bufin.length = sizeof(seq); maj_stat = gss_verify_mic(&min_stat, gd->gd_ctx, &bufin, &gd->gd_verf, &qop_state); if (maj_stat != GSS_S_COMPLETE || qop_state != gd->gd_qop) { rpc_gss_log_status("gss_verify_mic", gd->gd_mech, maj_stat, min_stat); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); } _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; break; } options_ret->major_status = GSS_S_COMPLETE; options_ret->minor_status = 0; options_ret->rpcsec_version = gd->gd_cred.gc_version; options_ret->gss_context = gd->gd_ctx; gd->gd_cred.gc_proc = RPCSEC_GSS_DATA; gd->gd_seq = 1; gd->gd_win = gr.gr_win; break; } } gss_release_name(&min_stat, &name); xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); out: /* End context negotiation loop. */ if (gd->gd_cred.gc_proc != RPCSEC_GSS_DATA) { rpc_createerr.cf_stat = RPC_AUTHERROR; _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); if (gd->gd_ctx) { gss_delete_sec_context(&min_stat, &gd->gd_ctx, GSS_C_NO_BUFFER); } KGSS_CURVNET_RESTORE(); mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_START; wakeup(gd); mtx_unlock(&gd->gd_lock); return (FALSE); } KGSS_CURVNET_RESTORE(); mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_ESTABLISHED; wakeup(gd); mtx_unlock(&gd->gd_lock); return (TRUE); } static bool_t rpc_gss_refresh(AUTH *auth, void *msg) { struct rpc_msg *reply = (struct rpc_msg *) msg; rpc_gss_options_ret_t options; struct rpc_gss_data *gd; gd = AUTH_PRIVATE(auth); /* * If the context is in DESTROYING state, then just return, since * there is no point in refreshing the credentials. */ mtx_lock(&gd->gd_lock); if (gd->gd_state == RPCSEC_GSS_DESTROYING) { mtx_unlock(&gd->gd_lock); return (FALSE); } mtx_unlock(&gd->gd_lock); /* * If the error was RPCSEC_GSS_CREDPROBLEM of * RPCSEC_GSS_CTXPROBLEM we start again from scratch. All * other errors are fatal. */ if (reply->rm_reply.rp_stat == MSG_DENIED && reply->rm_reply.rp_rjct.rj_stat == AUTH_ERROR && (reply->rm_reply.rp_rjct.rj_why == RPCSEC_GSS_CREDPROBLEM || reply->rm_reply.rp_rjct.rj_why == RPCSEC_GSS_CTXPROBLEM)) { rpc_gss_destroy_context(auth, FALSE); memset(&options, 0, sizeof(options)); return (rpc_gss_init(auth, &options)); } return (FALSE); } static void rpc_gss_destroy_context(AUTH *auth, bool_t send_destroy) { struct rpc_gss_data *gd; struct rpc_pending_request *pr; OM_uint32 min_stat; struct rpc_callextra ext; rpc_gss_log_debug("in rpc_gss_destroy_context()"); gd = AUTH_PRIVATE(auth); mtx_lock(&gd->gd_lock); /* * If the context isn't in ESTABISHED state, someone else is * destroying/refreshing - we wait till they are done. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { while (gd->gd_state != RPCSEC_GSS_START && gd->gd_state != RPCSEC_GSS_ESTABLISHED) msleep(gd, &gd->gd_lock, 0, "gssstate", 0); mtx_unlock(&gd->gd_lock); return; } gd->gd_state = RPCSEC_GSS_DESTROYING; mtx_unlock(&gd->gd_lock); if (send_destroy) { gd->gd_cred.gc_proc = RPCSEC_GSS_DESTROY; bzero(&ext, sizeof(ext)); ext.rc_auth = auth; CLNT_CALL_EXT(gd->gd_clnt, &ext, NULLPROC, (xdrproc_t)xdr_void, NULL, (xdrproc_t)xdr_void, NULL, AUTH_TIMEOUT); } while ((pr = LIST_FIRST(&gd->gd_reqs)) != NULL) { LIST_REMOVE(pr, pr_link); mem_free(pr, sizeof(*pr)); } /* * Free the context token. Remember that this was * allocated by XDR, not GSS-API. */ xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_cred.gc_handle); gd->gd_cred.gc_handle.length = 0; if (gd->gd_ctx != GSS_C_NO_CONTEXT) gss_delete_sec_context(&min_stat, &gd->gd_ctx, NULL); mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_START; wakeup(gd); mtx_unlock(&gd->gd_lock); } static void rpc_gss_destroy(AUTH *auth) { struct rpc_gss_data *gd; rpc_gss_log_debug("in rpc_gss_destroy()"); gd = AUTH_PRIVATE(auth); if (!refcount_release(&gd->gd_refs)) return; rpc_gss_destroy_context(auth, TRUE); CLNT_RELEASE(gd->gd_clnt); crfree(gd->gd_ucred); free(gd->gd_principal, M_RPC); if (gd->gd_clntprincipal != NULL) free(gd->gd_clntprincipal, M_RPC); if (gd->gd_verf.value) xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); mtx_destroy(&gd->gd_lock); mem_free(gd, sizeof(*gd)); mem_free(auth, sizeof(*auth)); } int rpc_gss_max_data_length(AUTH *auth, int max_tp_unit_len) { struct rpc_gss_data *gd; int want_conf; OM_uint32 max; OM_uint32 maj_stat, min_stat; int result; gd = AUTH_PRIVATE(auth); switch (gd->gd_cred.gc_svc) { case rpc_gss_svc_none: return (max_tp_unit_len); break; case rpc_gss_svc_default: case rpc_gss_svc_integrity: want_conf = FALSE; break; case rpc_gss_svc_privacy: want_conf = TRUE; break; default: return (0); } maj_stat = gss_wrap_size_limit(&min_stat, gd->gd_ctx, want_conf, gd->gd_qop, max_tp_unit_len, &max); if (maj_stat == GSS_S_COMPLETE) { result = (int) max; if (result < 0) result = 0; return (result); } else { rpc_gss_log_status("gss_wrap_size_limit", gd->gd_mech, maj_stat, min_stat); return (0); } } diff --git a/sys/rpc/rpcsec_gss/rpcsec_gss_int.h b/sys/rpc/rpcsec_gss/rpcsec_gss_int.h index 3d643af8c498..02a7767220de 100644 --- a/sys/rpc/rpcsec_gss/rpcsec_gss_int.h +++ b/sys/rpc/rpcsec_gss/rpcsec_gss_int.h @@ -1,95 +1,101 @@ /* rpcsec_gss.h SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: auth_gss.h,v 1.12 2001/04/30 19:44:47 andros Exp $ */ #ifndef _RPCSEC_GSS_INT_H #define _RPCSEC_GSS_INT_H #include /* RPCSEC_GSS control procedures. */ typedef enum { RPCSEC_GSS_DATA = 0, RPCSEC_GSS_INIT = 1, RPCSEC_GSS_CONTINUE_INIT = 2, RPCSEC_GSS_DESTROY = 3 } rpc_gss_proc_t; #define RPCSEC_GSS_VERSION 1 /* Credentials. */ struct rpc_gss_cred { u_int gc_version; /* version */ rpc_gss_proc_t gc_proc; /* control procedure */ u_int gc_seq; /* sequence number */ rpc_gss_service_t gc_svc; /* service */ gss_buffer_desc gc_handle; /* handle to server-side context */ }; /* Context creation response. */ struct rpc_gss_init_res { gss_buffer_desc gr_handle; /* handle to server-side context */ u_int gr_major; /* major status */ u_int gr_minor; /* minor status */ u_int gr_win; /* sequence window */ gss_buffer_desc gr_token; /* token */ }; /* Maximum sequence number value. */ #define MAXSEQ 0x80000000 +enum krb_imp { + KRBIMP_UNKNOWN, + KRBIMP_HESIOD1, + KRBIMP_MIT +}; + /* Prototypes. */ __BEGIN_DECLS bool_t xdr_rpc_gss_cred(XDR *xdrs, struct rpc_gss_cred *p); bool_t xdr_rpc_gss_init_res(XDR *xdrs, struct rpc_gss_init_res *p); bool_t xdr_rpc_gss_wrap_data(struct mbuf **argsp, gss_ctx_id_t ctx, gss_qop_t qop, rpc_gss_service_t svc, u_int seq); bool_t xdr_rpc_gss_unwrap_data(struct mbuf **resultsp, gss_ctx_id_t ctx, gss_qop_t qop, rpc_gss_service_t svc, u_int seq); const char *_rpc_gss_num_to_qop(const char *mech, u_int num); void _rpc_gss_set_error(int rpc_gss_error, int system_error); void rpc_gss_log_debug(const char *fmt, ...); void rpc_gss_log_status(const char *m, gss_OID mech, OM_uint32 major, OM_uint32 minor); __END_DECLS #endif /* !_RPCSEC_GSS_INT_H */ diff --git a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c index 51077c71822c..e047c557c712 100644 --- a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c +++ b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c @@ -1,1651 +1,1715 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 1990 The Regents of the University of California. * * Copyright (c) 2008 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* svc_rpcsec_gss.c Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: svc_auth_gss.c,v 1.27 2002/01/15 15:43:00 andros Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rpcsec_gss_int.h" static bool_t svc_rpc_gss_wrap(SVCAUTH *, struct mbuf **); static bool_t svc_rpc_gss_unwrap(SVCAUTH *, struct mbuf **); static void svc_rpc_gss_release(SVCAUTH *); static enum auth_stat svc_rpc_gss(struct svc_req *, struct rpc_msg *); static int rpc_gss_svc_getcred(struct svc_req *, struct ucred **, int *); static const struct svc_auth_ops svc_auth_gss_ops = { .svc_ah_wrap = svc_rpc_gss_wrap, .svc_ah_unwrap = svc_rpc_gss_unwrap, .svc_ah_release = svc_rpc_gss_release, }; struct sx svc_rpc_gss_lock; struct svc_rpc_gss_callback { SLIST_ENTRY(svc_rpc_gss_callback) cb_link; rpc_gss_callback_t cb_callback; }; SLIST_HEAD(svc_rpc_gss_callback_list, svc_rpc_gss_callback); KGSS_VNET_DEFINE_STATIC(struct svc_rpc_gss_callback_list, svc_rpc_gss_callbacks) = SLIST_HEAD_INITIALIZER(svc_rpc_gss_callbacks); struct svc_rpc_gss_svc_name { SLIST_ENTRY(svc_rpc_gss_svc_name) sn_link; char *sn_principal; gss_OID sn_mech; u_int sn_req_time; gss_cred_id_t sn_cred; u_int sn_program; u_int sn_version; }; SLIST_HEAD(svc_rpc_gss_svc_name_list, svc_rpc_gss_svc_name); KGSS_VNET_DEFINE_STATIC(struct svc_rpc_gss_svc_name_list, svc_rpc_gss_svc_names) = SLIST_HEAD_INITIALIZER(svc_rpc_gss_svc_names); enum svc_rpc_gss_client_state { CLIENT_NEW, /* still authenticating */ CLIENT_ESTABLISHED, /* context established */ CLIENT_STALE /* garbage to collect */ }; #define SVC_RPC_GSS_SEQWINDOW 128 struct svc_rpc_gss_clientid { unsigned long ci_hostid; uint32_t ci_boottime; uint32_t ci_id; }; struct svc_rpc_gss_client { TAILQ_ENTRY(svc_rpc_gss_client) cl_link; TAILQ_ENTRY(svc_rpc_gss_client) cl_alllink; volatile u_int cl_refs; struct sx cl_lock; struct svc_rpc_gss_clientid cl_id; time_t cl_expiration; /* when to gc */ enum svc_rpc_gss_client_state cl_state; /* client state */ bool_t cl_locked; /* fixed service+qop */ gss_ctx_id_t cl_ctx; /* context id */ gss_cred_id_t cl_creds; /* delegated creds */ gss_name_t cl_cname; /* client name */ struct svc_rpc_gss_svc_name *cl_sname; /* server name used */ rpc_gss_rawcred_t cl_rawcred; /* raw credentials */ rpc_gss_ucred_t cl_ucred; /* unix-style credentials */ struct ucred *cl_cred; /* kernel-style credentials */ int cl_rpcflavor; /* RPC pseudo sec flavor */ bool_t cl_done_callback; /* TRUE after call */ void *cl_cookie; /* user cookie from callback */ gid_t cl_gid_storage[NGROUPS]; gss_OID cl_mech; /* mechanism */ gss_qop_t cl_qop; /* quality of protection */ uint32_t cl_seqlast; /* sequence window origin */ uint32_t cl_seqmask[SVC_RPC_GSS_SEQWINDOW/32]; /* bitmask of seqnums */ }; TAILQ_HEAD(svc_rpc_gss_client_list, svc_rpc_gss_client); /* * This structure holds enough information to unwrap arguments or wrap * results for a given request. We use the rq_clntcred area for this * (which is a per-request buffer). */ struct svc_rpc_gss_cookedcred { struct svc_rpc_gss_client *cc_client; rpc_gss_service_t cc_service; uint32_t cc_seq; }; #define CLIENT_HASH_SIZE 256 #define CLIENT_MAX 1024 u_int svc_rpc_gss_client_max = CLIENT_MAX; u_int svc_rpc_gss_client_hash_size = CLIENT_HASH_SIZE; SYSCTL_DECL(_kern_rpc); SYSCTL_NODE(_kern_rpc, OID_AUTO, gss, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GSS"); SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_max, CTLFLAG_RW, &svc_rpc_gss_client_max, 0, "Max number of rpc-gss clients"); SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_hash, CTLFLAG_RDTUN, &svc_rpc_gss_client_hash_size, 0, "Size of rpc-gss client hash table"); static u_int svc_rpc_gss_lifetime_max = 0; SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, lifetime_max, CTLFLAG_RW, &svc_rpc_gss_lifetime_max, 0, "Maximum lifetime (seconds) of rpc-gss clients"); static u_int svc_rpc_gss_client_count; SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_count, CTLFLAG_RD, &svc_rpc_gss_client_count, 0, "Number of rpc-gss clients"); KGSS_VNET_DEFINE(struct svc_rpc_gss_client_list *, svc_rpc_gss_client_hash); KGSS_VNET_DEFINE(struct svc_rpc_gss_client_list, svc_rpc_gss_clients); KGSS_VNET_DEFINE_STATIC(uint32_t, svc_rpc_gss_next_clientid) = 1; static void svc_rpc_gss_init(void *unused __unused) { svc_auth_reg(RPCSEC_GSS, svc_rpc_gss, rpc_gss_svc_getcred); sx_init(&svc_rpc_gss_lock, "gsslock"); } SYSINIT(svc_rpc_gss_init, SI_SUB_VFS, SI_ORDER_ANY, svc_rpc_gss_init, NULL); static void svc_rpc_gss_cleanup(void *unused __unused) { sx_destroy(&svc_rpc_gss_lock); } SYSUNINIT(svc_rpc_gss_cleanup, SI_SUB_VFS, SI_ORDER_ANY, svc_rpc_gss_cleanup, NULL); static void svc_rpc_gss_vnetinit(void *unused __unused) { int i; KGSS_VNET(svc_rpc_gss_client_hash) = mem_alloc( sizeof(struct svc_rpc_gss_client_list) * svc_rpc_gss_client_hash_size); for (i = 0; i < svc_rpc_gss_client_hash_size; i++) TAILQ_INIT(&KGSS_VNET(svc_rpc_gss_client_hash)[i]); TAILQ_INIT(&KGSS_VNET(svc_rpc_gss_clients)); } VNET_SYSINIT(svc_rpc_gss_vnetinit, SI_SUB_VNET_DONE, SI_ORDER_ANY, svc_rpc_gss_vnetinit, NULL); static void svc_rpc_gss_vnet_cleanup(void *unused __unused) { mem_free(KGSS_VNET(svc_rpc_gss_client_hash), sizeof(struct svc_rpc_gss_client_list) * svc_rpc_gss_client_hash_size); } VNET_SYSUNINIT(svc_rpc_gss_vnet_cleanup, SI_SUB_VNET_DONE, SI_ORDER_ANY, svc_rpc_gss_vnet_cleanup, NULL); bool_t rpc_gss_set_callback(rpc_gss_callback_t *cb) { struct svc_rpc_gss_callback *scb; scb = mem_alloc(sizeof(struct svc_rpc_gss_callback)); if (!scb) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } scb->cb_callback = *cb; sx_xlock(&svc_rpc_gss_lock); SLIST_INSERT_HEAD(&KGSS_VNET(svc_rpc_gss_callbacks), scb, cb_link); sx_xunlock(&svc_rpc_gss_lock); return (TRUE); } void rpc_gss_clear_callback(rpc_gss_callback_t *cb) { struct svc_rpc_gss_callback *scb; sx_xlock(&svc_rpc_gss_lock); SLIST_FOREACH(scb, &KGSS_VNET(svc_rpc_gss_callbacks), cb_link) { if (scb->cb_callback.program == cb->program && scb->cb_callback.version == cb->version && scb->cb_callback.callback == cb->callback) { SLIST_REMOVE(&KGSS_VNET(svc_rpc_gss_callbacks), scb, svc_rpc_gss_callback, cb_link); sx_xunlock(&svc_rpc_gss_lock); mem_free(scb, sizeof(*scb)); return; } } sx_xunlock(&svc_rpc_gss_lock); } static bool_t rpc_gss_acquire_svc_cred(struct svc_rpc_gss_svc_name *sname) { OM_uint32 maj_stat, min_stat; gss_buffer_desc namebuf; gss_name_t name; gss_OID_set_desc oid_set; oid_set.count = 1; oid_set.elements = sname->sn_mech; namebuf.value = (void *) sname->sn_principal; namebuf.length = strlen(sname->sn_principal); maj_stat = gss_import_name(&min_stat, &namebuf, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) return (FALSE); if (sname->sn_cred != GSS_C_NO_CREDENTIAL) gss_release_cred(&min_stat, &sname->sn_cred); maj_stat = gss_acquire_cred(&min_stat, name, sname->sn_req_time, &oid_set, GSS_C_ACCEPT, &sname->sn_cred, NULL, NULL); if (maj_stat != GSS_S_COMPLETE) { gss_release_name(&min_stat, &name); return (FALSE); } gss_release_name(&min_stat, &name); return (TRUE); } bool_t rpc_gss_set_svc_name(const char *principal, const char *mechanism, u_int req_time, u_int program, u_int version) { struct svc_rpc_gss_svc_name *sname; gss_OID mech_oid; if (!rpc_gss_mech_to_oid(mechanism, &mech_oid)) return (FALSE); sname = mem_alloc(sizeof(*sname)); if (!sname) return (FALSE); sname->sn_principal = strdup(principal, M_RPC); sname->sn_mech = mech_oid; sname->sn_req_time = req_time; sname->sn_cred = GSS_C_NO_CREDENTIAL; sname->sn_program = program; sname->sn_version = version; if (!rpc_gss_acquire_svc_cred(sname)) { free(sname->sn_principal, M_RPC); mem_free(sname, sizeof(*sname)); return (FALSE); } sx_xlock(&svc_rpc_gss_lock); SLIST_INSERT_HEAD(&KGSS_VNET(svc_rpc_gss_svc_names), sname, sn_link); sx_xunlock(&svc_rpc_gss_lock); return (TRUE); } void rpc_gss_clear_svc_name(u_int program, u_int version) { OM_uint32 min_stat; struct svc_rpc_gss_svc_name *sname; sx_xlock(&svc_rpc_gss_lock); SLIST_FOREACH(sname, &KGSS_VNET(svc_rpc_gss_svc_names), sn_link) { if (sname->sn_program == program && sname->sn_version == version) { SLIST_REMOVE(&KGSS_VNET(svc_rpc_gss_svc_names), sname, svc_rpc_gss_svc_name, sn_link); sx_xunlock(&svc_rpc_gss_lock); gss_release_cred(&min_stat, &sname->sn_cred); free(sname->sn_principal, M_RPC); mem_free(sname, sizeof(*sname)); return; } } sx_xunlock(&svc_rpc_gss_lock); } bool_t rpc_gss_get_principal_name(rpc_gss_principal_t *principal, const char *mech, const char *name, const char *node, const char *domain) { OM_uint32 maj_stat, min_stat; gss_OID mech_oid; size_t namelen; gss_buffer_desc buf; gss_name_t gss_name, gss_mech_name; rpc_gss_principal_t result; if (!rpc_gss_mech_to_oid(mech, &mech_oid)) return (FALSE); /* * Construct a gss_buffer containing the full name formatted * as "name/node@domain" where node and domain are optional. */ namelen = strlen(name) + 1; if (node) { namelen += strlen(node) + 1; } if (domain) { namelen += strlen(domain) + 1; } buf.value = mem_alloc(namelen); buf.length = namelen; strcpy((char *) buf.value, name); if (node) { strcat((char *) buf.value, "/"); strcat((char *) buf.value, node); } if (domain) { strcat((char *) buf.value, "@"); strcat((char *) buf.value, domain); } /* * Convert that to a gss_name_t and then convert that to a * mechanism name in the selected mechanism. */ maj_stat = gss_import_name(&min_stat, &buf, GSS_C_NT_USER_NAME, &gss_name); mem_free(buf.value, buf.length); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_import_name", mech_oid, maj_stat, min_stat); return (FALSE); } maj_stat = gss_canonicalize_name(&min_stat, gss_name, mech_oid, &gss_mech_name); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_canonicalize_name", mech_oid, maj_stat, min_stat); gss_release_name(&min_stat, &gss_name); return (FALSE); } gss_release_name(&min_stat, &gss_name); /* * Export the mechanism name and use that to construct the * rpc_gss_principal_t result. */ maj_stat = gss_export_name(&min_stat, gss_mech_name, &buf); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_export_name", mech_oid, maj_stat, min_stat); gss_release_name(&min_stat, &gss_mech_name); return (FALSE); } gss_release_name(&min_stat, &gss_mech_name); result = mem_alloc(sizeof(int) + buf.length); if (!result) { gss_release_buffer(&min_stat, &buf); return (FALSE); } result->len = buf.length; memcpy(result->name, buf.value, buf.length); gss_release_buffer(&min_stat, &buf); *principal = result; return (TRUE); } /* * Note that the ip_addr and srv_principal pointers can point to the same * buffer, so long as ip_addr is at least strlen(srv_name) + 1 > srv_principal. */ bool_t rpc_gss_ip_to_srv_principal(char *ip_addr, const char *srv_name, char *srv_principal) { OM_uint32 maj_stat, min_stat; size_t len; /* * First fill in the service name and '@'. */ len = strlen(srv_name); if (len > NI_MAXSERV) return (FALSE); memcpy(srv_principal, srv_name, len); srv_principal[len] = '@'; /* * Do reverse DNS to get the DNS name for the ip_addr. */ maj_stat = gss_ip_to_dns(&min_stat, ip_addr, &srv_principal[len + 1]); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_ip_to_dns", NULL, maj_stat, min_stat); return (FALSE); } return (TRUE); } bool_t rpc_gss_getcred(struct svc_req *req, rpc_gss_rawcred_t **rcred, rpc_gss_ucred_t **ucred, void **cookie) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; if (req->rq_cred.oa_flavor != RPCSEC_GSS) return (FALSE); cc = req->rq_clntcred; client = cc->cc_client; if (rcred) *rcred = &client->cl_rawcred; if (ucred) *ucred = &client->cl_ucred; if (cookie) *cookie = client->cl_cookie; return (TRUE); } /* * This simpler interface is used by svc_getcred to copy the cred data * into a kernel cred structure. */ static int rpc_gss_svc_getcred(struct svc_req *req, struct ucred **crp, int *flavorp) { struct ucred *cr; struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_ucred_t *uc; if (req->rq_cred.oa_flavor != RPCSEC_GSS) return (FALSE); cc = req->rq_clntcred; client = cc->cc_client; if (flavorp) *flavorp = client->cl_rpcflavor; if (client->cl_cred) { *crp = crhold(client->cl_cred); return (TRUE); } uc = &client->cl_ucred; cr = client->cl_cred = crget(); cr->cr_uid = cr->cr_ruid = cr->cr_svuid = uc->uid; cr->cr_rgid = cr->cr_svgid = uc->gid; crsetgroups_and_egid(cr, uc->gidlen, uc->gidlist, uc->gid); cr->cr_prison = curthread->td_ucred->cr_prison; prison_hold(cr->cr_prison); *crp = crhold(cr); return (TRUE); } int rpc_gss_svc_max_data_length(struct svc_req *req, int max_tp_unit_len) { struct svc_rpc_gss_cookedcred *cc = req->rq_clntcred; struct svc_rpc_gss_client *client = cc->cc_client; int want_conf; OM_uint32 max; OM_uint32 maj_stat, min_stat; int result; switch (client->cl_rawcred.service) { case rpc_gss_svc_none: return (max_tp_unit_len); break; case rpc_gss_svc_default: case rpc_gss_svc_integrity: want_conf = FALSE; break; case rpc_gss_svc_privacy: want_conf = TRUE; break; default: return (0); } maj_stat = gss_wrap_size_limit(&min_stat, client->cl_ctx, want_conf, client->cl_qop, max_tp_unit_len, &max); if (maj_stat == GSS_S_COMPLETE) { result = (int) max; if (result < 0) result = 0; return (result); } else { rpc_gss_log_status("gss_wrap_size_limit", client->cl_mech, maj_stat, min_stat); return (0); } } static struct svc_rpc_gss_client * svc_rpc_gss_find_client(struct svc_rpc_gss_clientid *id) { struct svc_rpc_gss_client *client; struct svc_rpc_gss_client_list *list; struct timeval boottime; unsigned long hostid; rpc_gss_log_debug("in svc_rpc_gss_find_client(%d)", id->ci_id); getcredhostid(curthread->td_ucred, &hostid); getboottime(&boottime); if (id->ci_hostid != hostid || id->ci_boottime != boottime.tv_sec) return (NULL); list = &KGSS_VNET(svc_rpc_gss_client_hash) [id->ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_FOREACH(client, list, cl_link) { if (client->cl_id.ci_id == id->ci_id) { /* * Move this client to the front of the LRU * list. */ TAILQ_REMOVE(&KGSS_VNET(svc_rpc_gss_clients), client, cl_alllink); TAILQ_INSERT_HEAD(&KGSS_VNET(svc_rpc_gss_clients), client, cl_alllink); refcount_acquire(&client->cl_refs); break; } } sx_xunlock(&svc_rpc_gss_lock); return (client); } static struct svc_rpc_gss_client * svc_rpc_gss_create_client(void) { struct svc_rpc_gss_client *client; struct svc_rpc_gss_client_list *list; struct timeval boottime; unsigned long hostid; rpc_gss_log_debug("in svc_rpc_gss_create_client()"); client = mem_alloc(sizeof(struct svc_rpc_gss_client)); memset(client, 0, sizeof(struct svc_rpc_gss_client)); /* * Set the initial value of cl_refs to two. One for the caller * and the other to hold onto the client structure until it expires. */ refcount_init(&client->cl_refs, 2); sx_init(&client->cl_lock, "GSS-client"); getcredhostid(curthread->td_ucred, &hostid); client->cl_id.ci_hostid = hostid; getboottime(&boottime); client->cl_id.ci_boottime = boottime.tv_sec; client->cl_id.ci_id = KGSS_VNET(svc_rpc_gss_next_clientid)++; /* * Start the client off with a short expiration time. We will * try to get a saner value from the client creds later. */ client->cl_state = CLIENT_NEW; client->cl_locked = FALSE; client->cl_expiration = time_uptime + 5*60; list = &KGSS_VNET(svc_rpc_gss_client_hash) [client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_INSERT_HEAD(list, client, cl_link); TAILQ_INSERT_HEAD(&KGSS_VNET(svc_rpc_gss_clients), client, cl_alllink); svc_rpc_gss_client_count++; sx_xunlock(&svc_rpc_gss_lock); return (client); } static void svc_rpc_gss_destroy_client(struct svc_rpc_gss_client *client) { OM_uint32 min_stat; rpc_gss_log_debug("in svc_rpc_gss_destroy_client()"); if (client->cl_ctx) gss_delete_sec_context(&min_stat, &client->cl_ctx, GSS_C_NO_BUFFER); if (client->cl_cname) gss_release_name(&min_stat, &client->cl_cname); if (client->cl_rawcred.client_principal) mem_free(client->cl_rawcred.client_principal, sizeof(*client->cl_rawcred.client_principal) + client->cl_rawcred.client_principal->len); if (client->cl_cred) crfree(client->cl_cred); sx_destroy(&client->cl_lock); mem_free(client, sizeof(*client)); } /* * Drop a reference to a client and free it if that was the last reference. */ static void svc_rpc_gss_release_client(struct svc_rpc_gss_client *client) { if (!refcount_release(&client->cl_refs)) return; svc_rpc_gss_destroy_client(client); } /* * Remove a client from our global lists. * Must be called with svc_rpc_gss_lock held. */ static void svc_rpc_gss_forget_client_locked(struct svc_rpc_gss_client *client) { struct svc_rpc_gss_client_list *list; sx_assert(&svc_rpc_gss_lock, SX_XLOCKED); list = &KGSS_VNET(svc_rpc_gss_client_hash) [client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; TAILQ_REMOVE(list, client, cl_link); TAILQ_REMOVE(&KGSS_VNET(svc_rpc_gss_clients), client, cl_alllink); svc_rpc_gss_client_count--; } /* * Remove a client from our global lists and free it if we can. */ static void svc_rpc_gss_forget_client(struct svc_rpc_gss_client *client) { struct svc_rpc_gss_client_list *list; struct svc_rpc_gss_client *tclient; list = &KGSS_VNET(svc_rpc_gss_client_hash) [client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_FOREACH(tclient, list, cl_link) { /* * Make sure this client has not already been removed * from the lists by svc_rpc_gss_forget_client() or * svc_rpc_gss_forget_client_locked(). */ if (client == tclient) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); svc_rpc_gss_release_client(client); return; } } sx_xunlock(&svc_rpc_gss_lock); } static void svc_rpc_gss_timeout_clients(void) { struct svc_rpc_gss_client *client; time_t now = time_uptime; rpc_gss_log_debug("in svc_rpc_gss_timeout_clients()"); /* * First enforce the max client limit. We keep * svc_rpc_gss_clients in LRU order. */ sx_xlock(&svc_rpc_gss_lock); client = TAILQ_LAST(&KGSS_VNET(svc_rpc_gss_clients), svc_rpc_gss_client_list); while (svc_rpc_gss_client_count > svc_rpc_gss_client_max && client != NULL) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); svc_rpc_gss_release_client(client); sx_xlock(&svc_rpc_gss_lock); client = TAILQ_LAST(&KGSS_VNET(svc_rpc_gss_clients), svc_rpc_gss_client_list); } again: TAILQ_FOREACH(client, &KGSS_VNET(svc_rpc_gss_clients), cl_alllink) { if (client->cl_state == CLIENT_STALE || now > client->cl_expiration) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); rpc_gss_log_debug("expiring client %p", client); svc_rpc_gss_release_client(client); sx_xlock(&svc_rpc_gss_lock); goto again; } } sx_xunlock(&svc_rpc_gss_lock); } #ifdef DEBUG /* * OID<->string routines. These are uuuuugly. */ static OM_uint32 gss_oid_to_str(OM_uint32 *minor_status, gss_OID oid, gss_buffer_t oid_str) { char numstr[128]; unsigned long number; int numshift; size_t string_length; size_t i; unsigned char *cp; char *bp; /* Decoded according to krb5/gssapi_krb5.c */ /* First determine the size of the string */ string_length = 0; number = 0; numshift = 0; cp = (unsigned char *) oid->elements; number = (unsigned long) cp[0]; sprintf(numstr, "%ld ", number/40); string_length += strlen(numstr); sprintf(numstr, "%ld ", number%40); string_length += strlen(numstr); for (i=1; ilength; i++) { if ( (size_t) (numshift+7) < (sizeof(unsigned long)*8)) { number = (number << 7) | (cp[i] & 0x7f); numshift += 7; } else { *minor_status = 0; return(GSS_S_FAILURE); } if ((cp[i] & 0x80) == 0) { sprintf(numstr, "%ld ", number); string_length += strlen(numstr); number = 0; numshift = 0; } } /* * If we get here, we've calculated the length of "n n n ... n ". Add 4 * here for "{ " and "}\0". */ string_length += 4; if ((bp = malloc(string_length, M_GSSAPI, M_WAITOK | M_ZERO))) { strcpy(bp, "{ "); number = (unsigned long) cp[0]; sprintf(numstr, "%ld ", number/40); strcat(bp, numstr); sprintf(numstr, "%ld ", number%40); strcat(bp, numstr); number = 0; cp = (unsigned char *) oid->elements; for (i=1; ilength; i++) { number = (number << 7) | (cp[i] & 0x7f); if ((cp[i] & 0x80) == 0) { sprintf(numstr, "%ld ", number); strcat(bp, numstr); number = 0; } } strcat(bp, "}"); oid_str->length = strlen(bp)+1; oid_str->value = (void *) bp; *minor_status = 0; return(GSS_S_COMPLETE); } *minor_status = 0; return(GSS_S_FAILURE); } #endif static void svc_rpc_gss_build_ucred(struct svc_rpc_gss_client *client, const gss_name_t name) { OM_uint32 maj_stat, min_stat; rpc_gss_ucred_t *uc = &client->cl_ucred; int numgroups; uc->uid = 65534; uc->gid = 65534; uc->gidlist = client->cl_gid_storage; numgroups = NGROUPS; maj_stat = gss_pname_to_unix_cred(&min_stat, name, client->cl_mech, &uc->uid, &uc->gid, &numgroups, &uc->gidlist[0]); if (GSS_ERROR(maj_stat)) uc->gidlen = 0; else uc->gidlen = numgroups; } static void svc_rpc_gss_set_flavor(struct svc_rpc_gss_client *client) { static gss_OID_desc krb5_mech_oid = {9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" }; /* * Attempt to translate mech type and service into a * 'pseudo flavor'. Hardwire in krb5 support for now. */ if (kgss_oid_equal(client->cl_mech, &krb5_mech_oid)) { switch (client->cl_rawcred.service) { case rpc_gss_svc_default: case rpc_gss_svc_none: client->cl_rpcflavor = RPCSEC_GSS_KRB5; break; case rpc_gss_svc_integrity: client->cl_rpcflavor = RPCSEC_GSS_KRB5I; break; case rpc_gss_svc_privacy: client->cl_rpcflavor = RPCSEC_GSS_KRB5P; break; } } else { client->cl_rpcflavor = RPCSEC_GSS; } } static bool_t svc_rpc_gss_accept_sec_context(struct svc_rpc_gss_client *client, struct svc_req *rqst, struct rpc_gss_init_res *gr, struct rpc_gss_cred *gc) { gss_buffer_desc recv_tok; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags; OM_uint32 cred_lifetime; struct svc_rpc_gss_svc_name *sname; + gss_buffer_desc export_name; + rpc_gss_ucred_t *uc = &client->cl_ucred; + int numgroups; + static enum krb_imp my_krb_imp = KRBIMP_UNKNOWN; rpc_gss_log_debug("in svc_rpc_gss_accept_context()"); + if (my_krb_imp == KRBIMP_UNKNOWN) { + maj_stat = gss_supports_lucid(&min_stat, NULL); + if (maj_stat == GSS_S_COMPLETE) + my_krb_imp = KRBIMP_MIT; + else + my_krb_imp = KRBIMP_HESIOD1; + min_stat = 0; + } + + if (my_krb_imp == KRBIMP_MIT) { + uc->uid = 65534; + uc->gid = 65534; + uc->gidlist = client->cl_gid_storage; + numgroups = NGROUPS; + } + /* Deserialize arguments. */ memset(&recv_tok, 0, sizeof(recv_tok)); if (!svc_getargs(rqst, (xdrproc_t) xdr_gss_buffer_desc, (caddr_t) &recv_tok)) { client->cl_state = CLIENT_STALE; return (FALSE); } /* * First time round, try all the server names we have until * one matches. Afterwards, stick with that one. */ sx_xlock(&svc_rpc_gss_lock); if (!client->cl_sname) { SLIST_FOREACH(sname, &KGSS_VNET(svc_rpc_gss_svc_names), sn_link) { if (sname->sn_program == rqst->rq_prog && sname->sn_version == rqst->rq_vers) { retry: - gr->gr_major = gss_accept_sec_context( - &gr->gr_minor, - &client->cl_ctx, - sname->sn_cred, - &recv_tok, - GSS_C_NO_CHANNEL_BINDINGS, - &client->cl_cname, - &mech, - &gr->gr_token, - &ret_flags, - &cred_lifetime, - &client->cl_creds); + if (my_krb_imp == KRBIMP_MIT) + gr->gr_major = + gss_accept_sec_context_lucid_v1( + &gr->gr_minor, + &client->cl_ctx, + sname->sn_cred, + &recv_tok, + GSS_C_NO_CHANNEL_BINDINGS, + &client->cl_cname, + &mech, + &gr->gr_token, + &ret_flags, + &cred_lifetime, + &client->cl_creds, + &export_name, + &uc->uid, + &uc->gid, + &numgroups, + &uc->gidlist[0]); + else + gr->gr_major = gss_accept_sec_context( + &gr->gr_minor, + &client->cl_ctx, + sname->sn_cred, + &recv_tok, + GSS_C_NO_CHANNEL_BINDINGS, + &client->cl_cname, + &mech, + &gr->gr_token, + &ret_flags, + &cred_lifetime, + &client->cl_creds); if (gr->gr_major == GSS_S_CREDENTIALS_EXPIRED) { /* * Either our creds really did * expire or gssd was * restarted. */ if (rpc_gss_acquire_svc_cred(sname)) goto retry; } client->cl_sname = sname; break; } } if (!sname) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_tok); sx_xunlock(&svc_rpc_gss_lock); return (FALSE); } } else { - gr->gr_major = gss_accept_sec_context( - &gr->gr_minor, - &client->cl_ctx, - client->cl_sname->sn_cred, - &recv_tok, - GSS_C_NO_CHANNEL_BINDINGS, - &client->cl_cname, - &mech, - &gr->gr_token, - &ret_flags, - &cred_lifetime, - NULL); + if (my_krb_imp == KRBIMP_MIT) + gr->gr_major = gss_accept_sec_context_lucid_v1( + &gr->gr_minor, + &client->cl_ctx, + client->cl_sname->sn_cred, + &recv_tok, + GSS_C_NO_CHANNEL_BINDINGS, + &client->cl_cname, + &mech, + &gr->gr_token, + &ret_flags, + &cred_lifetime, + NULL, + &export_name, + &uc->uid, + &uc->gid, + &numgroups, + &uc->gidlist[0]); + else + gr->gr_major = gss_accept_sec_context( + &gr->gr_minor, + &client->cl_ctx, + client->cl_sname->sn_cred, + &recv_tok, + GSS_C_NO_CHANNEL_BINDINGS, + &client->cl_cname, + &mech, + &gr->gr_token, + &ret_flags, + &cred_lifetime, + NULL); } sx_xunlock(&svc_rpc_gss_lock); xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_tok); /* * If we get an error from gss_accept_sec_context, send the * reply anyway so that the client gets a chance to see what * is wrong. */ if (gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("accept_sec_context", client->cl_mech, gr->gr_major, gr->gr_minor); client->cl_state = CLIENT_STALE; + if (my_krb_imp == KRBIMP_MIT) + uc->gidlen = 0; return (TRUE); } + if (my_krb_imp == KRBIMP_MIT) + uc->gidlen = numgroups; gr->gr_handle.value = &client->cl_id; gr->gr_handle.length = sizeof(client->cl_id); gr->gr_win = SVC_RPC_GSS_SEQWINDOW; /* Save client info. */ client->cl_mech = mech; client->cl_qop = GSS_C_QOP_DEFAULT; client->cl_done_callback = FALSE; if (gr->gr_major == GSS_S_COMPLETE) { - gss_buffer_desc export_name; - /* * Change client expiration time to be near when the * client creds expire (or 24 hours if we can't figure * that out). */ if (cred_lifetime == GSS_C_INDEFINITE) cred_lifetime = 24*60*60; /* * Cap cred_lifetime if sysctl kern.rpc.gss.lifetime_max is set. */ if (svc_rpc_gss_lifetime_max > 0 && cred_lifetime > svc_rpc_gss_lifetime_max) cred_lifetime = svc_rpc_gss_lifetime_max; client->cl_expiration = time_uptime + cred_lifetime; /* * Fill in cred details in the rawcred structure. */ client->cl_rawcred.version = RPCSEC_GSS_VERSION; rpc_gss_oid_to_mech(mech, &client->cl_rawcred.mechanism); - maj_stat = gss_export_name(&min_stat, client->cl_cname, - &export_name); + maj_stat = GSS_S_COMPLETE; + if (my_krb_imp != KRBIMP_MIT) + maj_stat = gss_export_name(&min_stat, client->cl_cname, + &export_name); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_export_name", client->cl_mech, maj_stat, min_stat); return (FALSE); } client->cl_rawcred.client_principal = mem_alloc(sizeof(*client->cl_rawcred.client_principal) + export_name.length); client->cl_rawcred.client_principal->len = export_name.length; memcpy(client->cl_rawcred.client_principal->name, export_name.value, export_name.length); gss_release_buffer(&min_stat, &export_name); client->cl_rawcred.svc_principal = client->cl_sname->sn_principal; client->cl_rawcred.service = gc->gc_svc; /* * Use gss_pname_to_uid to map to unix creds. For * kerberos5, this uses krb5_aname_to_localname. */ - svc_rpc_gss_build_ucred(client, client->cl_cname); + if (my_krb_imp != KRBIMP_MIT) + svc_rpc_gss_build_ucred(client, client->cl_cname); svc_rpc_gss_set_flavor(client); gss_release_name(&min_stat, &client->cl_cname); #ifdef DEBUG { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); rpc_gss_log_debug("accepted context for %s with " "", client->cl_rawcred.client_principal->name, mechname.length, (char *)mechname.value, client->cl_qop, client->cl_rawcred.service); gss_release_buffer(&min_stat, &mechname); } #endif /* DEBUG */ } return (TRUE); } static bool_t svc_rpc_gss_validate(struct svc_rpc_gss_client *client, struct rpc_msg *msg, gss_qop_t *qop, rpc_gss_proc_t gcproc) { struct opaque_auth *oa; gss_buffer_desc rpcbuf, checksum; OM_uint32 maj_stat, min_stat; gss_qop_t qop_state; int32_t rpchdr[128 / sizeof(int32_t)]; int32_t *buf; rpc_gss_log_debug("in svc_rpc_gss_validate()"); memset(rpchdr, 0, sizeof(rpchdr)); /* Reconstruct RPC header for signing (from xdr_callmsg). */ buf = rpchdr; IXDR_PUT_LONG(buf, msg->rm_xid); IXDR_PUT_ENUM(buf, msg->rm_direction); IXDR_PUT_LONG(buf, msg->rm_call.cb_rpcvers); IXDR_PUT_LONG(buf, msg->rm_call.cb_prog); IXDR_PUT_LONG(buf, msg->rm_call.cb_vers); IXDR_PUT_LONG(buf, msg->rm_call.cb_proc); oa = &msg->rm_call.cb_cred; IXDR_PUT_ENUM(buf, oa->oa_flavor); IXDR_PUT_LONG(buf, oa->oa_length); if (oa->oa_length) { memcpy((caddr_t)buf, oa->oa_base, oa->oa_length); buf += RNDUP(oa->oa_length) / sizeof(int32_t); } rpcbuf.value = rpchdr; rpcbuf.length = (u_char *)buf - (u_char *)rpchdr; checksum.value = msg->rm_call.cb_verf.oa_base; checksum.length = msg->rm_call.cb_verf.oa_length; maj_stat = gss_verify_mic(&min_stat, client->cl_ctx, &rpcbuf, &checksum, &qop_state); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_verify_mic", client->cl_mech, maj_stat, min_stat); /* * A bug in some versions of the Linux client generates a * Destroy operation with a bogus encrypted checksum. Deleting * the credential handle for that case causes the mount to fail. * Since the checksum is bogus (gss_verify_mic() failed), it * doesn't make sense to destroy the handle and not doing so * fixes the Linux mount. */ if (gcproc != RPCSEC_GSS_DESTROY) client->cl_state = CLIENT_STALE; return (FALSE); } *qop = qop_state; return (TRUE); } static bool_t svc_rpc_gss_nextverf(struct svc_rpc_gss_client *client, struct svc_req *rqst, u_int seq) { gss_buffer_desc signbuf; gss_buffer_desc mic; OM_uint32 maj_stat, min_stat; uint32_t nseq; rpc_gss_log_debug("in svc_rpc_gss_nextverf()"); nseq = htonl(seq); signbuf.value = &nseq; signbuf.length = sizeof(nseq); maj_stat = gss_get_mic(&min_stat, client->cl_ctx, client->cl_qop, &signbuf, &mic); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_get_mic", client->cl_mech, maj_stat, min_stat); client->cl_state = CLIENT_STALE; return (FALSE); } KASSERT(mic.length <= MAX_AUTH_BYTES, ("MIC too large for RPCSEC_GSS")); rqst->rq_verf.oa_flavor = RPCSEC_GSS; rqst->rq_verf.oa_length = mic.length; bcopy(mic.value, rqst->rq_verf.oa_base, mic.length); gss_release_buffer(&min_stat, &mic); return (TRUE); } static bool_t svc_rpc_gss_callback(struct svc_rpc_gss_client *client, struct svc_req *rqst) { struct svc_rpc_gss_callback *scb; rpc_gss_lock_t lock; void *cookie; bool_t cb_res; bool_t result; /* * See if we have a callback for this guy. */ result = TRUE; SLIST_FOREACH(scb, &KGSS_VNET(svc_rpc_gss_callbacks), cb_link) { if (scb->cb_callback.program == rqst->rq_prog && scb->cb_callback.version == rqst->rq_vers) { /* * This one matches. Call the callback and see * if it wants to veto or something. */ lock.locked = FALSE; lock.raw_cred = &client->cl_rawcred; cb_res = scb->cb_callback.callback(rqst, client->cl_creds, client->cl_ctx, &lock, &cookie); if (!cb_res) { client->cl_state = CLIENT_STALE; result = FALSE; break; } /* * The callback accepted the connection - it * is responsible for freeing client->cl_creds * now. */ client->cl_creds = GSS_C_NO_CREDENTIAL; client->cl_locked = lock.locked; client->cl_cookie = cookie; return (TRUE); } } /* * Either no callback exists for this program/version or one * of the callbacks rejected the connection. We just need to * clean up the delegated client creds, if any. */ if (client->cl_creds) { OM_uint32 min_ver; gss_release_cred(&min_ver, &client->cl_creds); } return (result); } static bool_t svc_rpc_gss_check_replay(struct svc_rpc_gss_client *client, uint32_t seq) { uint32_t offset; int word, bit; bool_t result; sx_xlock(&client->cl_lock); if (seq <= client->cl_seqlast) { /* * The request sequence number is less than * the largest we have seen so far. If it is * outside the window or if we have seen a * request with this sequence before, silently * discard it. */ offset = client->cl_seqlast - seq; if (offset >= SVC_RPC_GSS_SEQWINDOW) { result = FALSE; goto out; } word = offset / 32; bit = offset % 32; if (client->cl_seqmask[word] & (1 << bit)) { result = FALSE; goto out; } } result = TRUE; out: sx_xunlock(&client->cl_lock); return (result); } static void svc_rpc_gss_update_seq(struct svc_rpc_gss_client *client, uint32_t seq) { int offset, i, word, bit; uint32_t carry, newcarry; sx_xlock(&client->cl_lock); if (seq > client->cl_seqlast) { /* * This request has a sequence number greater * than any we have seen so far. Advance the * seq window and set bit zero of the window * (which corresponds to the new sequence * number) */ offset = seq - client->cl_seqlast; while (offset > 32) { for (i = (SVC_RPC_GSS_SEQWINDOW / 32) - 1; i > 0; i--) { client->cl_seqmask[i] = client->cl_seqmask[i-1]; } client->cl_seqmask[0] = 0; offset -= 32; } carry = 0; for (i = 0; i < SVC_RPC_GSS_SEQWINDOW / 32; i++) { newcarry = client->cl_seqmask[i] >> (32 - offset); client->cl_seqmask[i] = (client->cl_seqmask[i] << offset) | carry; carry = newcarry; } client->cl_seqmask[0] |= 1; client->cl_seqlast = seq; } else { offset = client->cl_seqlast - seq; word = offset / 32; bit = offset % 32; client->cl_seqmask[word] |= (1 << bit); } sx_xunlock(&client->cl_lock); } enum auth_stat svc_rpc_gss(struct svc_req *rqst, struct rpc_msg *msg) { OM_uint32 min_stat; XDR xdrs; struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; struct rpc_gss_cred gc; struct rpc_gss_init_res gr; gss_qop_t qop; int call_stat; enum auth_stat result; KGSS_CURVNET_SET_QUIET(KGSS_TD_TO_VNET(curthread)); rpc_gss_log_debug("in svc_rpc_gss()"); /* Garbage collect old clients. */ svc_rpc_gss_timeout_clients(); /* Initialize reply. */ rqst->rq_verf = _null_auth; /* Deserialize client credentials. */ if (rqst->rq_cred.oa_length <= 0) { KGSS_CURVNET_RESTORE(); return (AUTH_BADCRED); } memset(&gc, 0, sizeof(gc)); xdrmem_create(&xdrs, rqst->rq_cred.oa_base, rqst->rq_cred.oa_length, XDR_DECODE); if (!xdr_rpc_gss_cred(&xdrs, &gc)) { XDR_DESTROY(&xdrs); KGSS_CURVNET_RESTORE(); return (AUTH_BADCRED); } XDR_DESTROY(&xdrs); client = NULL; /* Check version. */ if (gc.gc_version != RPCSEC_GSS_VERSION) { result = AUTH_BADCRED; goto out; } /* Check the proc and find the client (or create it) */ if (gc.gc_proc == RPCSEC_GSS_INIT) { if (gc.gc_handle.length != 0) { result = AUTH_BADCRED; goto out; } client = svc_rpc_gss_create_client(); } else { struct svc_rpc_gss_clientid *p; if (gc.gc_handle.length != sizeof(*p)) { result = AUTH_BADCRED; goto out; } p = gc.gc_handle.value; client = svc_rpc_gss_find_client(p); if (!client) { /* * Can't find the client - we may have * destroyed it - tell the other side to * re-authenticate. */ result = RPCSEC_GSS_CREDPROBLEM; goto out; } } cc = rqst->rq_clntcred; cc->cc_client = client; cc->cc_service = gc.gc_svc; cc->cc_seq = gc.gc_seq; /* * The service and sequence number must be ignored for * RPCSEC_GSS_INIT and RPCSEC_GSS_CONTINUE_INIT. */ if (gc.gc_proc != RPCSEC_GSS_INIT && gc.gc_proc != RPCSEC_GSS_CONTINUE_INIT) { /* * Check for sequence number overflow. */ if (gc.gc_seq >= MAXSEQ) { result = RPCSEC_GSS_CTXPROBLEM; goto out; } /* * Check for valid service. */ if (gc.gc_svc != rpc_gss_svc_none && gc.gc_svc != rpc_gss_svc_integrity && gc.gc_svc != rpc_gss_svc_privacy) { result = AUTH_BADCRED; goto out; } } /* Handle RPCSEC_GSS control procedure. */ switch (gc.gc_proc) { case RPCSEC_GSS_INIT: case RPCSEC_GSS_CONTINUE_INIT: if (rqst->rq_proc != NULLPROC) { result = AUTH_REJECTEDCRED; break; } memset(&gr, 0, sizeof(gr)); if (!svc_rpc_gss_accept_sec_context(client, rqst, &gr, &gc)) { result = AUTH_REJECTEDCRED; break; } if (gr.gr_major == GSS_S_COMPLETE) { /* * We borrow the space for the call verf to * pack our reply verf. */ rqst->rq_verf = msg->rm_call.cb_verf; if (!svc_rpc_gss_nextverf(client, rqst, gr.gr_win)) { result = AUTH_REJECTEDCRED; break; } } else { rqst->rq_verf = _null_auth; } call_stat = svc_sendreply(rqst, (xdrproc_t) xdr_rpc_gss_init_res, (caddr_t) &gr); gss_release_buffer(&min_stat, &gr.gr_token); if (!call_stat) { result = AUTH_FAILED; break; } if (gr.gr_major == GSS_S_COMPLETE) client->cl_state = CLIENT_ESTABLISHED; result = RPCSEC_GSS_NODISPATCH; break; case RPCSEC_GSS_DATA: case RPCSEC_GSS_DESTROY: if (!svc_rpc_gss_check_replay(client, gc.gc_seq)) { result = RPCSEC_GSS_NODISPATCH; break; } if (!svc_rpc_gss_validate(client, msg, &qop, gc.gc_proc)) { result = RPCSEC_GSS_CREDPROBLEM; break; } /* * We borrow the space for the call verf to pack our * reply verf. */ rqst->rq_verf = msg->rm_call.cb_verf; if (!svc_rpc_gss_nextverf(client, rqst, gc.gc_seq)) { result = RPCSEC_GSS_CTXPROBLEM; break; } svc_rpc_gss_update_seq(client, gc.gc_seq); /* * Change the SVCAUTH ops on the request to point at * our own code so that we can unwrap the arguments * and wrap the result. The caller will re-set this on * every request to point to a set of null wrap/unwrap * methods. Acquire an extra reference to the client * which will be released by svc_rpc_gss_release() * after the request has finished processing. */ refcount_acquire(&client->cl_refs); rqst->rq_auth.svc_ah_ops = &svc_auth_gss_ops; rqst->rq_auth.svc_ah_private = cc; if (gc.gc_proc == RPCSEC_GSS_DATA) { /* * We might be ready to do a callback to the server to * see if it wants to accept/reject the connection. */ sx_xlock(&client->cl_lock); if (!client->cl_done_callback) { client->cl_done_callback = TRUE; client->cl_qop = qop; client->cl_rawcred.qop = _rpc_gss_num_to_qop( client->cl_rawcred.mechanism, qop); if (!svc_rpc_gss_callback(client, rqst)) { result = AUTH_REJECTEDCRED; sx_xunlock(&client->cl_lock); break; } } sx_xunlock(&client->cl_lock); /* * If the server has locked this client to a * particular service+qop pair, enforce that * restriction now. */ if (client->cl_locked) { if (client->cl_rawcred.service != gc.gc_svc) { result = AUTH_FAILED; break; } else if (client->cl_qop != qop) { result = AUTH_BADVERF; break; } } /* * If the qop changed, look up the new qop * name for rawcred. */ if (client->cl_qop != qop) { client->cl_qop = qop; client->cl_rawcred.qop = _rpc_gss_num_to_qop( client->cl_rawcred.mechanism, qop); } /* * Make sure we use the right service value * for unwrap/wrap. */ if (client->cl_rawcred.service != gc.gc_svc) { client->cl_rawcred.service = gc.gc_svc; svc_rpc_gss_set_flavor(client); } result = AUTH_OK; } else { if (rqst->rq_proc != NULLPROC) { result = AUTH_REJECTEDCRED; break; } call_stat = svc_sendreply(rqst, (xdrproc_t) xdr_void, (caddr_t) NULL); if (!call_stat) { result = AUTH_FAILED; break; } svc_rpc_gss_forget_client(client); result = RPCSEC_GSS_NODISPATCH; break; } break; default: result = AUTH_BADCRED; break; } out: if (client) svc_rpc_gss_release_client(client); xdr_free((xdrproc_t) xdr_rpc_gss_cred, (char *) &gc); KGSS_CURVNET_RESTORE(); return (result); } static bool_t svc_rpc_gss_wrap(SVCAUTH *auth, struct mbuf **mp) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_wrap()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; if (client->cl_state != CLIENT_ESTABLISHED || cc->cc_service == rpc_gss_svc_none || *mp == NULL) { return (TRUE); } return (xdr_rpc_gss_wrap_data(mp, client->cl_ctx, client->cl_qop, cc->cc_service, cc->cc_seq)); } static bool_t svc_rpc_gss_unwrap(SVCAUTH *auth, struct mbuf **mp) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_unwrap()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; if (client->cl_state != CLIENT_ESTABLISHED || cc->cc_service == rpc_gss_svc_none) { return (TRUE); } return (xdr_rpc_gss_unwrap_data(mp, client->cl_ctx, client->cl_qop, cc->cc_service, cc->cc_seq)); } static void svc_rpc_gss_release(SVCAUTH *auth) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_release()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; svc_rpc_gss_release_client(client); } diff --git a/usr.sbin/Makefile b/usr.sbin/Makefile index c361c1e5866d..51908818e550 100644 --- a/usr.sbin/Makefile +++ b/usr.sbin/Makefile @@ -1,229 +1,231 @@ .include SUBDIR= adduser \ arp \ binmiscctl \ boottrace \ bsdconfig \ camdd \ cdcontrol \ chkgrp \ chown \ chroot \ ckdist \ clear_locks \ crashinfo \ cron \ ctld \ ctladm \ daemon \ dconschat \ devctl \ devinfo \ diskinfo \ dumpcis \ etcupdate \ extattr \ extattrctl \ fifolog \ fstyp \ fwcontrol \ fwget \ getfmac \ getpmac \ gstat \ i2c \ ifmcstat \ iostat \ iovctl \ kldxref \ mailwrapper \ makefs \ memcontrol \ mfiutil \ mixer \ mlxcontrol \ mountd \ mount_smbfs \ mpsutil \ mptutil \ mtest \ newsyslog \ nfscbd \ nfsd \ nfsdumpstate \ nfsrevoke \ nfsuserd \ nmtree \ nologin \ nvmfd \ pciconf \ periodic \ pnfsdscopymr \ pnfsdsfile \ pnfsdskill \ powerd \ prometheus_sysctl_exporter \ pstat \ pw \ pwd_mkdb \ pwm \ quot \ rarpd \ rmt \ rpcbind \ rpc.lockd \ rpc.statd \ rpc.umntall \ rtprio \ rwhod \ service \ services_mkdb \ sesutil \ setfib \ setfmac \ setpmac \ smbmsg \ snapinfo \ sndctl \ spi \ spray \ syslogd \ sysrc \ tcpdrop \ tcpdump \ tcpsso \ traceroute \ trim \ tzsetup \ ugidfw \ valectl \ vigr \ vipw \ wake \ watch \ watchdogd \ zdump \ zic \ zonectl # NB: keep these sorted by MK_* knobs SUBDIR.${MK_ACCT}+= accton SUBDIR.${MK_ACCT}+= sa SUBDIR.${MK_AUDIT}+= audit SUBDIR.${MK_AUDIT}+= auditd .if ${MK_OPENSSL} != "no" SUBDIR.${MK_AUDIT}+= auditdistd .endif SUBDIR.${MK_AUDIT}+= auditreduce SUBDIR.${MK_AUDIT}+= praudit SUBDIR.${MK_AUTHPF}+= authpf SUBDIR.${MK_AUTOFS}+= autofs SUBDIR.${MK_BLACKLIST}+= blacklistctl SUBDIR.${MK_BLACKLIST}+= blacklistd SUBDIR.${MK_BLUETOOTH}+= bluetooth SUBDIR.${MK_BOOTPARAMD}+= bootparamd SUBDIR.${MK_BSDINSTALL}+= bsdinstall SUBDIR.${MK_BSNMP}+= bsnmpd .if ${MK_CAROOT} != "no" SUBDIR.${MK_OPENSSL}+= certctl .endif SUBDIR.${MK_CXGBETOOL}+= cxgbetool SUBDIR.${MK_EFI}+= efivar efidp efibootmgr efitable efiwake .if ${MK_OPENSSL} != "no" SUBDIR.${MK_EFI}+= uefisign .endif SUBDIR.${MK_FDT}+= ofwdump SUBDIR.${MK_FLOPPY}+= fdcontrol SUBDIR.${MK_FLOPPY}+= fdformat SUBDIR.${MK_FLOPPY}+= fdread SUBDIR.${MK_FLOPPY}+= fdwrite SUBDIR.${MK_FREEBSD_UPDATE}+= freebsd-update +.if ${MK_KERBEROS_SUPPORT} != "no" SUBDIR.${MK_GSSAPI}+= gssd +.endif SUBDIR.${MK_GPIO}+= gpioctl SUBDIR.${MK_HYPERV}+= hyperv SUBDIR.${MK_INET6}+= ip6addrctl SUBDIR.${MK_INET6}+= mld6query SUBDIR.${MK_INET6}+= ndp SUBDIR.${MK_INET6}+= rip6query SUBDIR.${MK_INET6}+= route6d SUBDIR.${MK_INET6}+= rrenumd SUBDIR.${MK_INET6}+= rtadvctl SUBDIR.${MK_INET6}+= rtadvd SUBDIR.${MK_INET6}+= rtsold SUBDIR.${MK_INET6}+= traceroute6 SUBDIR.${MK_INETD}+= inetd SUBDIR.${MK_IPFW}+= ipfwpcap SUBDIR.${MK_ISCSI}+= iscsid SUBDIR.${MK_JAIL}+= jail SUBDIR.${MK_JAIL}+= jexec SUBDIR.${MK_JAIL}+= jls # XXX MK_SYSCONS SUBDIR.${MK_LEGACY_CONSOLE}+= kbdcontrol SUBDIR.${MK_LEGACY_CONSOLE}+= kbdmap SUBDIR.${MK_LEGACY_CONSOLE}+= moused SUBDIR.${MK_LEGACY_CONSOLE}+= vidcontrol SUBDIR.${MK_PPP}+= pppctl SUBDIR.${MK_NS_CACHING}+= nscd SUBDIR.${MK_LPR}+= lpr SUBDIR.${MK_MAN_UTILS}+= manctl SUBDIR.${MK_MLX5TOOL}+= mlx5tool SUBDIR.${MK_NETGRAPH}+= flowctl SUBDIR.${MK_NETGRAPH}+= ngctl SUBDIR.${MK_NETGRAPH}+= nghook SUBDIR.${MK_NIS}+= rpc.yppasswdd SUBDIR.${MK_NIS}+= rpc.ypupdated SUBDIR.${MK_NIS}+= rpc.ypxfrd SUBDIR.${MK_NIS}+= ypbind SUBDIR.${MK_NIS}+= ypldap SUBDIR.${MK_NIS}+= yp_mkdb SUBDIR.${MK_NIS}+= yppoll SUBDIR.${MK_NIS}+= yppush SUBDIR.${MK_NIS}+= ypserv SUBDIR.${MK_NIS}+= ypset SUBDIR.${MK_NTP}+= ntp SUBDIR.${MK_OPENSSL_KTLS}+= rpc.tlsclntd SUBDIR.${MK_OPENSSL_KTLS}+= rpc.tlsservd SUBDIR.${MK_PF}+= ftp-proxy SUBDIR.${MK_PKGBOOTSTRAP}+= pkg SUBDIR.${MK_PMC}+= pmc pmcannotate pmccontrol pmcstat pmcstudy SUBDIR.${MK_PPP}+= ppp SUBDIR.${MK_QUOTAS}+= edquota SUBDIR.${MK_QUOTAS}+= quotaon SUBDIR.${MK_QUOTAS}+= repquota SUBDIR.${MK_SENDMAIL}+= editmap SUBDIR.${MK_SENDMAIL}+= mailstats SUBDIR.${MK_SENDMAIL}+= makemap SUBDIR.${MK_SENDMAIL}+= praliases SUBDIR.${MK_SENDMAIL}+= sendmail SUBDIR.${MK_TCP_WRAPPERS}+= tcpdchk SUBDIR.${MK_TCP_WRAPPERS}+= tcpdmatch SUBDIR.${MK_TOOLCHAIN}+= config SUBDIR.${MK_TOOLCHAIN}+= crunch SUBDIR.${MK_UNBOUND}+= unbound SUBDIR.${MK_USB}+= uathload SUBDIR.${MK_USB}+= uhsoctl SUBDIR.${MK_USB}+= usbconfig SUBDIR.${MK_USB}+= usbdump SUBDIR.${MK_UTMPX}+= ac SUBDIR.${MK_UTMPX}+= lastlogin SUBDIR.${MK_UTMPX}+= utx SUBDIR.${MK_WIRELESS}+= wlandebug SUBDIR.${MK_WIRELESS}+= wlanstats SUBDIR.${MK_WIRELESS}+= wpa SUBDIR.${MK_TESTS}+= tests .include SUBDIR_PARALLEL= # Add architecture-specific manpages # to be included anyway MAN= apmd/apmd.8 \ nvram/nvram.8 .include .include diff --git a/usr.sbin/gssd/Makefile b/usr.sbin/gssd/Makefile index 336a1b49f696..a4ac035ae476 100644 --- a/usr.sbin/gssd/Makefile +++ b/usr.sbin/gssd/Makefile @@ -1,41 +1,37 @@ .include PACKAGE= gssd PROG= gssd MAN= gssd.8 SRCS= gssd.c gssd.h gssd_svc.c gssd_xdr.c gssd_prot.c CFLAGS+= -I. WARNS?= 1 -.if ${MK_KERBEROS_SUPPORT} != "no" .if ${MK_MITKRB5} != "no" # MIT KRB5 LIBADD+= gssapi_krb5 krb5 k5crypto krb5profile krb5support CFLAGS+= -DMK_MITKRB5=yes .else # Heimdal LIBADD+= gssapi krb5 roken .endif -.else -CFLAGS+= -DWITHOUT_KERBEROS -.endif CLEANFILES= gssd_svc.c gssd_xdr.c gssd.h RPCSRC= ${SRCTOP}/sys/kgssapi/gssd.x RPCGEN= RPCGEN_CPP=${CPP:Q} rpcgen -L -C -M gssd_svc.c: ${RPCSRC} gssd.h ${RPCGEN} -m -o ${.TARGET} ${RPCSRC} gssd_xdr.c: ${RPCSRC} gssd.h ${RPCGEN} -c -o ${.TARGET} ${RPCSRC} gssd.h: ${RPCSRC} ${RPCGEN} -h -o ${.TARGET} ${RPCSRC} .PATH: ${SRCTOP}/sys/kgssapi .include diff --git a/usr.sbin/gssd/gssd.c b/usr.sbin/gssd/gssd.c index 2a3af05496cf..54d2062dd29a 100644 --- a/usr.sbin/gssd/gssd.c +++ b/usr.sbin/gssd/gssd.c @@ -1,1279 +1,1718 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#ifndef WITHOUT_KERBEROS #include -#endif #include #include #include #include #include #include #include #include #include #include #include #ifdef MK_MITKRB5 #include #endif #include #include #include "gssd.h" #ifndef _PATH_GSS_MECH #define _PATH_GSS_MECH "/etc/gss/mech" #endif #define GSSD_CREDENTIAL_CACHE_FILE "/tmp/krb5cc_gssd" struct gss_resource { LIST_ENTRY(gss_resource) gr_link; uint64_t gr_id; /* identifier exported to kernel */ void* gr_res; /* GSS-API resource pointer */ }; LIST_HEAD(gss_resource_list, gss_resource) gss_resources; int gss_resource_count; uint32_t gss_next_id; uint32_t gss_start_time; int debug_level; static char ccfile_dirlist[PATH_MAX + 1], ccfile_substring[NAME_MAX + 1]; static char pref_realm[1024]; static int verbose; static int hostbased_initiator_cred; -#ifndef WITHOUT_KERBEROS /* 1.2.752.43.13.14 */ static gss_OID_desc gss_krb5_set_allowable_enctypes_x_desc = {6, (void *) "\x2a\x85\x70\x2b\x0d\x0e"}; static gss_OID GSS_KRB5_SET_ALLOWABLE_ENCTYPES_X = &gss_krb5_set_allowable_enctypes_x_desc; static gss_OID_desc gss_krb5_mech_oid_x_desc = {9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" }; static gss_OID GSS_KRB5_MECH_OID_X = &gss_krb5_mech_oid_x_desc; -#endif static void gssd_load_mech(void); static int find_ccache_file(const char *, uid_t, char *); static int is_a_valid_tgt_cache(const char *, uid_t, int *, time_t *); static void gssd_verbose_out(const char *, ...); -#ifndef WITHOUT_KERBEROS static krb5_error_code gssd_get_cc_from_keytab(const char *); static OM_uint32 gssd_get_user_cred(OM_uint32 *, uid_t, gss_cred_id_t *); -#endif void gssd_terminate(int); extern void gssd_1(struct svc_req *rqstp, SVCXPRT *transp); int main(int argc, char **argv) { /* * We provide an RPC service on a Netlink socket. The kernel's GSS API * code will multicast its calls, we will listen to them, receive them, * process them and reply. */ int oldmask, ch, debug, jailed; SVCXPRT *xprt; size_t jailed_size; /* * Initialize the credential cache file name substring and the * search directory list. */ strlcpy(ccfile_substring, "krb5cc_", sizeof(ccfile_substring)); ccfile_dirlist[0] = '\0'; pref_realm[0] = '\0'; debug = 0; verbose = 0; while ((ch = getopt(argc, argv, "dhvs:c:r:")) != -1) { switch (ch) { case 'd': debug_level++; break; case 'h': -#ifndef WITHOUT_KERBEROS /* * Enable use of a host based initiator credential * in the default keytab file. */ hostbased_initiator_cred = 1; -#else - errx(1, "This option not available when built" - " without MK_KERBEROS\n"); -#endif break; case 'v': verbose = 1; break; case 's': -#ifndef WITHOUT_KERBEROS /* * Set the directory search list. This enables use of * find_ccache_file() to search the directories for a * suitable credentials cache file. */ strlcpy(ccfile_dirlist, optarg, sizeof(ccfile_dirlist)); -#else - errx(1, "This option not available when built" - " without MK_KERBEROS\n"); -#endif break; case 'c': /* * Specify a non-default credential cache file * substring. */ strlcpy(ccfile_substring, optarg, sizeof(ccfile_substring)); break; case 'r': /* * Set the preferred realm for the credential cache tgt. */ strlcpy(pref_realm, optarg, sizeof(pref_realm)); break; default: fprintf(stderr, "usage: %s [-d] [-s dir-list] [-c file-substring]" " [-r preferred-realm]\n", argv[0]); exit(1); break; } } gssd_load_mech(); if (!debug_level) { if (daemon(0, 0) != 0) err(1, "Can't daemonize"); signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); signal(SIGHUP, SIG_IGN); } signal(SIGTERM, gssd_terminate); signal(SIGPIPE, gssd_terminate); if ((xprt = svc_nl_create("kgss")) == NULL) { if (debug_level == 0) { syslog(LOG_ERR, "Can't create transport for local gssd socket"); exit(1); } err(1, "Can't create transport for local gssd socket"); } if (!svc_reg(xprt, GSSD, GSSDVERS, gssd_1, NULL)) { if (debug_level == 0) { syslog(LOG_ERR, "Can't register service for local gssd socket"); exit(1); } err(1, "Can't register service for local gssd socket"); } LIST_INIT(&gss_resources); gss_next_id = 1; gss_start_time = time(0); svc_run(); return (0); } static void gssd_load_mech(void) { FILE *fp; char buf[256]; char *p; char *name, *oid, *lib, *kobj; fp = fopen(_PATH_GSS_MECH, "r"); if (!fp) return; while (fgets(buf, sizeof(buf), fp)) { if (*buf == '#') continue; p = buf; name = strsep(&p, "\t\n "); if (p) while (isspace(*p)) p++; oid = strsep(&p, "\t\n "); if (p) while (isspace(*p)) p++; lib = strsep(&p, "\t\n "); if (p) while (isspace(*p)) p++; kobj = strsep(&p, "\t\n "); if (!name || !oid || !lib || !kobj) continue; if (strcmp(kobj, "-")) { /* * Attempt to load the kernel module if its * not already present. */ if (modfind(kobj) < 0) { if (kldload(kobj) < 0) { fprintf(stderr, "%s: can't find or load kernel module %s for %s\n", getprogname(), kobj, name); } } } } fclose(fp); } static void * gssd_find_resource(uint64_t id) { struct gss_resource *gr; if (!id) return (NULL); LIST_FOREACH(gr, &gss_resources, gr_link) if (gr->gr_id == id) return (gr->gr_res); return (NULL); } static uint64_t gssd_make_resource(void *res) { struct gss_resource *gr; if (!res) return (0); gr = malloc(sizeof(struct gss_resource)); if (!gr) return (0); gr->gr_id = (gss_next_id++) + ((uint64_t) gss_start_time << 32); gr->gr_res = res; LIST_INSERT_HEAD(&gss_resources, gr, gr_link); gss_resource_count++; if (debug_level > 1) printf("%d resources allocated\n", gss_resource_count); return (gr->gr_id); } static void gssd_delete_resource(uint64_t id) { struct gss_resource *gr; LIST_FOREACH(gr, &gss_resources, gr_link) { if (gr->gr_id == id) { LIST_REMOVE(gr, gr_link); free(gr); gss_resource_count--; if (debug_level > 1) printf("%d resources allocated\n", gss_resource_count); return; } } } static void gssd_verbose_out(const char *fmt, ...) { va_list ap; if (verbose != 0) { va_start(ap, fmt); if (debug_level == 0) vsyslog(LOG_INFO | LOG_DAEMON, fmt, ap); else vfprintf(stderr, fmt, ap); va_end(ap); } } bool_t gssd_null_1_svc(void *argp, void *result, struct svc_req *rqstp) { gssd_verbose_out("gssd_null: done\n"); return (TRUE); } +#ifndef MK_MITKRB5 bool_t gssd_init_sec_context_1_svc(init_sec_context_args *argp, init_sec_context_res *result, struct svc_req *rqstp) { gss_cred_id_t cred = GSS_C_NO_CREDENTIAL; gss_ctx_id_t ctx = GSS_C_NO_CONTEXT; gss_name_t name = GSS_C_NO_NAME; char ccname[PATH_MAX + 5 + 1], *cp, *cp2; int gotone, gotcred; OM_uint32 min_stat; -#ifndef WITHOUT_KERBEROS gss_buffer_desc principal_desc; char enctype[sizeof(uint32_t)]; int key_enctype; OM_uint32 maj_stat; -#endif memset(result, 0, sizeof(*result)); if (hostbased_initiator_cred != 0 && argp->cred != 0 && argp->uid == 0) { /* * These credentials are for a host based initiator name * in a keytab file, which should now have credentials * in /tmp/krb5cc_gssd, because gss_acquire_cred() did * the equivalent of "kinit -k". */ snprintf(ccname, sizeof(ccname), "FILE:%s", GSSD_CREDENTIAL_CACHE_FILE); } else if (ccfile_dirlist[0] != '\0' && argp->cred == 0) { /* * For the "-s" case and no credentials provided as an * argument, search the directory list for an appropriate * credential cache file. If the search fails, return failure. */ gotone = 0; cp = ccfile_dirlist; do { cp2 = strchr(cp, ':'); if (cp2 != NULL) *cp2 = '\0'; gotone = find_ccache_file(cp, argp->uid, ccname); if (gotone != 0) break; if (cp2 != NULL) *cp2++ = ':'; cp = cp2; } while (cp != NULL && *cp != '\0'); if (gotone == 0) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_init_sec_context: -s no" " credential cache file found for uid=%d\n", (int)argp->uid); return (TRUE); } } else { /* * If there wasn't a "-s" option or the credentials have * been provided as an argument, do it the old way. * When credentials are provided, the uid should be root. */ if (argp->cred != 0 && argp->uid != 0) { if (debug_level == 0) syslog(LOG_ERR, "gss_init_sec_context:" " cred for non-root"); else fprintf(stderr, "gss_init_sec_context:" " cred for non-root\n"); } snprintf(ccname, sizeof(ccname), "FILE:/tmp/krb5cc_%d", (int) argp->uid); } setenv("KRB5CCNAME", ccname, TRUE); if (argp->cred) { cred = gssd_find_resource(argp->cred); if (!cred) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_init_sec_context: cred" " resource not found\n"); return (TRUE); } } if (argp->ctx) { ctx = gssd_find_resource(argp->ctx); if (!ctx) { result->major_status = GSS_S_CONTEXT_EXPIRED; gssd_verbose_out("gssd_init_sec_context: context" " resource not found\n"); return (TRUE); } } if (argp->name) { name = gssd_find_resource(argp->name); if (!name) { result->major_status = GSS_S_BAD_NAME; gssd_verbose_out("gssd_init_sec_context: name" " resource not found\n"); return (TRUE); } } gotcred = 0; result->major_status = gss_init_sec_context(&result->minor_status, cred, &ctx, name, argp->mech_type, argp->req_flags, argp->time_req, argp->input_chan_bindings, &argp->input_token, &result->actual_mech_type, &result->output_token, &result->ret_flags, &result->time_rec); gssd_verbose_out("gssd_init_sec_context: done major=0x%x minor=%d" " uid=%d\n", (unsigned int)result->major_status, (int)result->minor_status, (int)argp->uid); if (gotcred != 0) gss_release_cred(&min_stat, &cred); if (result->major_status == GSS_S_COMPLETE || result->major_status == GSS_S_CONTINUE_NEEDED) { if (argp->ctx) result->ctx = argp->ctx; else result->ctx = gssd_make_resource(ctx); } return (TRUE); } +bool_t +gssd_supports_lucid_1_svc(void *argp, supports_lucid_res *result, struct svc_req *rqstp) +{ + + gssd_verbose_out("gssd_lucid: done\n"); + result->major_status = GSS_S_UNAVAILABLE; + return (TRUE); +} + +bool_t +gssd_init_sec_context_lucid_v1_1_svc(init_sec_context_lucid_v1_args *argp, + init_sec_context_lucid_v1_res *result, struct svc_req *rqstp) +{ + + gssd_verbose_out("gssd_init_sec_context_lucid_v1: Heimdal\n"); + result->major_status = GSS_S_UNAVAILABLE; + return (TRUE); +} + +bool_t +gssd_accept_sec_context_lucid_v1_1_svc(accept_sec_context_lucid_v1_args *argp, + accept_sec_context_lucid_v1_res *result, struct svc_req *rqstp) +{ + + gssd_verbose_out("gssd_accept_sec_context_lucid_v1: Heimdal\n"); + result->major_status = GSS_S_UNAVAILABLE; + return (TRUE); +} + bool_t gssd_accept_sec_context_1_svc(accept_sec_context_args *argp, accept_sec_context_res *result, struct svc_req *rqstp) { gss_ctx_id_t ctx = GSS_C_NO_CONTEXT; gss_cred_id_t cred = GSS_C_NO_CREDENTIAL; gss_name_t src_name; gss_cred_id_t delegated_cred_handle; memset(result, 0, sizeof(*result)); if (argp->ctx) { ctx = gssd_find_resource(argp->ctx); if (!ctx) { result->major_status = GSS_S_CONTEXT_EXPIRED; gssd_verbose_out("gssd_accept_sec_context: ctx" " resource not found\n"); return (TRUE); } } if (argp->cred) { cred = gssd_find_resource(argp->cred); if (!cred) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_accept_sec_context: cred" " resource not found\n"); return (TRUE); } } memset(result, 0, sizeof(*result)); result->major_status = gss_accept_sec_context(&result->minor_status, &ctx, cred, &argp->input_token, argp->input_chan_bindings, &src_name, &result->mech_type, &result->output_token, &result->ret_flags, &result->time_rec, &delegated_cred_handle); gssd_verbose_out("gssd_accept_sec_context: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status == GSS_S_COMPLETE || result->major_status == GSS_S_CONTINUE_NEEDED) { if (argp->ctx) result->ctx = argp->ctx; else result->ctx = gssd_make_resource(ctx); result->src_name = gssd_make_resource(src_name); result->delegated_cred_handle = gssd_make_resource(delegated_cred_handle); } return (TRUE); } +#else /* MK_MITKRB5 */ +bool_t +gssd_supports_lucid_1_svc(void *argp, supports_lucid_res *result, struct svc_req *rqstp) +{ + + gssd_verbose_out("gssd_lucid: done\n"); + result->vers = 1; + result->major_status = GSS_S_COMPLETE; + return (TRUE); +} + +bool_t +gssd_init_sec_context_1_svc(init_sec_context_args *argp, + init_sec_context_res *result, struct svc_req *rqstp) +{ + + gssd_verbose_out("gssd_init_sec_context: MIT\n"); + result->major_status = GSS_S_UNAVAILABLE; + return (TRUE); +} + +bool_t +gssd_accept_sec_context_1_svc(accept_sec_context_args *argp, + accept_sec_context_res *result, struct svc_req *rqstp) +{ + + gssd_verbose_out("gssd_accept_sec_context: MIT\n"); + result->major_status = GSS_S_UNAVAILABLE; + return (TRUE); +} + +bool_t +gssd_init_sec_context_lucid_v1_1_svc(init_sec_context_lucid_v1_args *argp, + init_sec_context_lucid_v1_res *result, struct svc_req *rqstp) +{ + gss_cred_id_t cred = GSS_C_NO_CREDENTIAL; + gss_ctx_id_t ctx = GSS_C_NO_CONTEXT; + gss_name_t name = GSS_C_NO_NAME; + char ccname[PATH_MAX + 5 + 1], *cp, *cp2; + int gotone, gotcred; + OM_uint32 min_stat; + gss_buffer_desc principal_desc; + char enctype[sizeof(uint32_t)]; + int key_enctype; + OM_uint32 maj_stat; + + memset(result, 0, sizeof(*result)); + if (hostbased_initiator_cred != 0 && argp->cred != 0 && + argp->uid == 0) { + /* + * These credentials are for a host based initiator name + * in a keytab file, which should now have credentials + * in /tmp/krb5cc_gssd, because gss_acquire_cred() did + * the equivalent of "kinit -k". + */ + snprintf(ccname, sizeof(ccname), "FILE:%s", + GSSD_CREDENTIAL_CACHE_FILE); + } else if (ccfile_dirlist[0] != '\0' && argp->cred == 0) { + /* + * For the "-s" case and no credentials provided as an + * argument, search the directory list for an appropriate + * credential cache file. If the search fails, return failure. + */ + gotone = 0; + cp = ccfile_dirlist; + do { + cp2 = strchr(cp, ':'); + if (cp2 != NULL) + *cp2 = '\0'; + gotone = find_ccache_file(cp, argp->uid, ccname); + if (gotone != 0) + break; + if (cp2 != NULL) + *cp2++ = ':'; + cp = cp2; + } while (cp != NULL && *cp != '\0'); + if (gotone == 0) { + result->major_status = GSS_S_CREDENTIALS_EXPIRED; + gssd_verbose_out("gssd_init_sec_context_plus: -s no" + " credential cache file found for uid=%d\n", + (int)argp->uid); + return (TRUE); + } + } else { + /* + * If there wasn't a "-s" option or the credentials have + * been provided as an argument, do it the old way. + * When credentials are provided, the uid should be root. + */ + if (argp->cred != 0 && argp->uid != 0) { + if (debug_level == 0) + syslog(LOG_ERR, "gss_init_sec_context_plus:" + " cred for non-root"); + else + fprintf(stderr, "gss_init_sec_context_plus:" + " cred for non-root\n"); + } + snprintf(ccname, sizeof(ccname), "FILE:/tmp/krb5cc_%d", + (int) argp->uid); + } + setenv("KRB5CCNAME", ccname, TRUE); + + if (argp->cred) { + cred = gssd_find_resource(argp->cred); + if (!cred) { + result->major_status = GSS_S_CREDENTIALS_EXPIRED; + gssd_verbose_out("gssd_init_sec_context_plus: cred" + " resource not found\n"); + return (TRUE); + } + } + if (argp->ctx) { + ctx = gssd_find_resource(argp->ctx); + if (!ctx) { + result->major_status = GSS_S_CONTEXT_EXPIRED; + gssd_verbose_out("gssd_init_sec_context_plus: context" + " resource not found\n"); + return (TRUE); + } + } + if (argp->name) { + name = gssd_find_resource(argp->name); + if (!name) { + result->major_status = GSS_S_BAD_NAME; + gssd_verbose_out("gssd_init_sec_context_plus: name" + " resource not found\n"); + return (TRUE); + } + } + gotcred = 0; + + result->major_status = gss_init_sec_context(&result->minor_status, + cred, &ctx, name, argp->mech_type, + argp->req_flags, argp->time_req, argp->input_chan_bindings, + &argp->input_token, &result->actual_mech_type, + &result->output_token, &result->ret_flags, &result->time_rec); + gssd_verbose_out("gssd_init_sec_context_plus: done major=0x%x minor=%d" + " uid=%d\n", (unsigned int)result->major_status, + (int)result->minor_status, (int)argp->uid); + if (gotcred != 0) + gss_release_cred(&min_stat, &cred); + + if (result->actual_mech_type) { + /* + * Just to keep the bogus "elements" pointer + * from core dumping the daemon when linked to MIT + * libraries. For some reason, the "elements" pointer + * in actual_mech_type cannot be read. + */ + result->actual_mech_type = GSS_KRB5_MECH_OID_X; + } + + if (result->major_status == GSS_S_COMPLETE + || result->major_status == GSS_S_CONTINUE_NEEDED) { + if (argp->ctx) + result->ctx = argp->ctx; + else + result->ctx = gssd_make_resource(ctx); + } + + if (result->major_status == GSS_S_COMPLETE) { + gss_krb5_lucid_context_v1_t *lctx; + + result->major_status = gss_krb5_export_lucid_sec_context( + &result->minor_status, &ctx, 1, (void *)&lctx); + gssd_delete_resource(result->ctx); + if (result->major_status == GSS_S_COMPLETE && + lctx != NULL) { + result->lucid.initiate = lctx->initiate; + result->lucid.endtime = lctx->endtime; + result->lucid.send_seq = lctx->send_seq; + result->lucid.recv_seq = lctx->recv_seq; + result->lucid.protocol = lctx->protocol; + if (lctx->protocol == 0) { + result->lucid.rfc_sign = + lctx->rfc1964_kd.sign_alg; + result->lucid.rfc_seal = + lctx->rfc1964_kd.seal_alg; + result->lucid.ctx_type = + lctx->rfc1964_kd.ctx_key.type; + result->lucid.ctx_key.length = + lctx->rfc1964_kd.ctx_key.length; + result->lucid.ctx_key.value = + mem_alloc(result->lucid.ctx_key.length); + memcpy(result->lucid.ctx_key.value, + lctx->rfc1964_kd.ctx_key.data, + result->lucid.ctx_key.length); + } else if (lctx->protocol == 1) { + result->lucid.have_subkey = + lctx->cfx_kd.have_acceptor_subkey; + result->lucid.ctx_type = + lctx->cfx_kd.ctx_key.type; + result->lucid.ctx_key.length = + lctx->cfx_kd.ctx_key.length; + result->lucid.ctx_key.value = + mem_alloc(result->lucid.ctx_key.length); + memcpy(result->lucid.ctx_key.value, + lctx->cfx_kd.ctx_key.data, + result->lucid.ctx_key.length); + if (result->lucid.have_subkey != 0) { + result->lucid.subkey_type = + lctx->cfx_kd.acceptor_subkey.type; + result->lucid.subkey_key.length = + lctx->cfx_kd.acceptor_subkey.length; + result->lucid.subkey_key.value = + mem_alloc( + result->lucid.subkey_key.length); + memcpy(result->lucid.subkey_key.value, + lctx->cfx_kd.acceptor_subkey.data, + result->lucid.subkey_key.length); + } else { + result->lucid.subkey_type = 0; + result->lucid.subkey_key.length = 0; + result->lucid.subkey_key.value = NULL; + } + } + (void)gss_krb5_free_lucid_sec_context(&min_stat, + (void *)lctx); + } else { + gssd_verbose_out("gss_krb5_export_lucid_set_context" + " failed: major=0x%x minor=%d lctx=%p\n", + result->major_status, result->minor_status, lctx); + } + } + + return (TRUE); +} + +/* + * Internal function to acquire unix credentials. + */ +static OM_uint32 +_gss_get_unix_cred(OM_uint32 *minor_stat, gss_name_t name, gss_OID mech, + uid_t *uidp, gid_t *gidp, int *numgroups, gid_t *groups) +{ + OM_uint32 major_stat; + uid_t uid; + char buf[1024], *bufp; + struct passwd pwd, *pw; + size_t buflen; + int error; + static size_t buflen_hint = 1024; + + major_stat = gss_pname_to_uid(minor_stat, name, mech, &uid); + if (major_stat == GSS_S_COMPLETE) { + *uidp = uid; + buflen = buflen_hint; + for (;;) { + pw = NULL; + bufp = buf; + if (buflen > sizeof(buf)) + bufp = malloc(buflen); + if (bufp == NULL) + break; + error = getpwuid_r(uid, &pwd, bufp, buflen, + &pw); + if (error != ERANGE) + break; + if (buflen > sizeof(buf)) + free(bufp); + buflen += 1024; + if (buflen > buflen_hint) + buflen_hint = buflen; + } + if (pw) { + *gidp = pw->pw_gid; + getgrouplist(pw->pw_name, pw->pw_gid, + groups, numgroups); + } else { + major_stat = GSS_S_FAILURE; + gssd_verbose_out("get_unix_cred: cannot find" + " passwd entry\n"); + } + if (bufp != NULL && buflen > sizeof(buf)) + free(bufp); + } else if (major_stat != GSS_S_UNAVAILABLE) { + gssd_verbose_out("gssd_pname_to_uid: failed major=0x%x" + " minor=%d\n", major_stat, *minor_stat); + } + return (major_stat); +} + +bool_t +gssd_accept_sec_context_lucid_v1_1_svc(accept_sec_context_lucid_v1_args *argp, + accept_sec_context_lucid_v1_res *result, struct svc_req *rqstp) +{ + gss_ctx_id_t ctx = GSS_C_NO_CONTEXT; + gss_cred_id_t cred = GSS_C_NO_CREDENTIAL; + gss_name_t src_name; + gss_cred_id_t delegated_cred_handle; + OM_uint32 min_stat; + + memset(result, 0, sizeof(*result)); + if (argp->ctx) { + ctx = gssd_find_resource(argp->ctx); + if (!ctx) { + result->major_status = GSS_S_CONTEXT_EXPIRED; + gssd_verbose_out("gssd_accept_sec_context: ctx" + " resource not found\n"); + return (TRUE); + } + } + if (argp->cred) { + cred = gssd_find_resource(argp->cred); + if (!cred) { + result->major_status = GSS_S_CREDENTIALS_EXPIRED; + gssd_verbose_out("gssd_accept_sec_context: cred" + " resource not found\n"); + return (TRUE); + } + } + + memset(result, 0, sizeof(*result)); + result->major_status = gss_accept_sec_context(&result->minor_status, + &ctx, cred, &argp->input_token, argp->input_chan_bindings, + &src_name, &result->mech_type, &result->output_token, + &result->ret_flags, &result->time_rec, + &delegated_cred_handle); + gssd_verbose_out("gssd_accept_sec_context: done major=0x%x minor=%d\n", + (unsigned int)result->major_status, (int)result->minor_status); + + if (result->major_status == GSS_S_COMPLETE + || result->major_status == GSS_S_CONTINUE_NEEDED) { + if (argp->ctx) + result->ctx = argp->ctx; + else + result->ctx = gssd_make_resource(ctx); + result->src_name = gssd_make_resource(src_name); + result->delegated_cred_handle = + gssd_make_resource(delegated_cred_handle); + } + + if (result->major_status == GSS_S_COMPLETE) { + gss_krb5_lucid_context_v1_t *lctx; + + /* Get the lucid context stuff. */ + result->major_status = gss_krb5_export_lucid_sec_context( + &result->minor_status, &ctx, 1, (void *)&lctx); + gssd_delete_resource(result->ctx); + if (result->major_status == GSS_S_COMPLETE && + lctx != NULL) { + result->lucid.initiate = lctx->initiate; + result->lucid.endtime = lctx->endtime; + result->lucid.send_seq = lctx->send_seq; + result->lucid.recv_seq = lctx->recv_seq; + result->lucid.protocol = lctx->protocol; + if (lctx->protocol == 0) { + result->lucid.rfc_sign = + lctx->rfc1964_kd.sign_alg; + result->lucid.rfc_seal = + lctx->rfc1964_kd.seal_alg; + result->lucid.ctx_type = + lctx->rfc1964_kd.ctx_key.type; + result->lucid.ctx_key.length = + lctx->rfc1964_kd.ctx_key.length; + result->lucid.ctx_key.value = + mem_alloc(result->lucid.ctx_key.length); + memcpy(result->lucid.ctx_key.value, + lctx->rfc1964_kd.ctx_key.data, + result->lucid.ctx_key.length); + } else if (lctx->protocol == 1) { + result->lucid.have_subkey = + lctx->cfx_kd.have_acceptor_subkey; + result->lucid.ctx_type = + lctx->cfx_kd.ctx_key.type; + result->lucid.ctx_key.length = + lctx->cfx_kd.ctx_key.length; + result->lucid.ctx_key.value = + mem_alloc(result->lucid.ctx_key.length); + memcpy(result->lucid.ctx_key.value, + lctx->cfx_kd.ctx_key.data, + result->lucid.ctx_key.length); + if (result->lucid.have_subkey != 0) { + result->lucid.subkey_type = + lctx->cfx_kd.acceptor_subkey.type; + result->lucid.subkey_key.length = + lctx->cfx_kd.acceptor_subkey.length; + result->lucid.subkey_key.value = + mem_alloc( + result->lucid.subkey_key.length); + memcpy(result->lucid.subkey_key.value, + lctx->cfx_kd.acceptor_subkey.data, + result->lucid.subkey_key.length); + } else { + result->lucid.subkey_type = 0; + result->lucid.subkey_key.length = 0; + result->lucid.subkey_key.value = NULL; + } + } + (void)gss_krb5_free_lucid_sec_context(&min_stat, + (void *)lctx); + } else { + gssd_verbose_out("gss_krb5_export_lucid_set_context" + " failed: major=0x%x minor=%d lctx=%p\n", + result->major_status, result->minor_status, lctx); + } + + /* Now, get the exported name. */ + if (result->major_status == GSS_S_COMPLETE) { + result->major_status = gss_export_name( + &result->minor_status, src_name, + &result->exported_name); + gssd_verbose_out("gssd_accept_sec_context (name):" + " done major=0x%x minor=%d\n", + result->major_status, result->minor_status); + } + + /* Finally, get the unix credentials. */ + if (result->major_status == GSS_S_COMPLETE) { + gid_t groups[NGROUPS]; + int i, len = NGROUPS; + OM_uint32 major_stat, minor_stat; + + major_stat = _gss_get_unix_cred(&minor_stat, + src_name, result->mech_type, + &result->uid, &result->gid, &len, groups); + if (major_stat == GSS_S_COMPLETE) { + result->gidlist.gidlist_len = len; + result->gidlist.gidlist_val = + mem_alloc(len * sizeof(uint32_t)); + /* + * Just in case + * sizeof(gid_t) != sizeof(uint32_t). + */ + for (i = 0; i < len; i++) + result->gidlist.gidlist_val[i] = + groups[i]; + } else { + result->gid = 65534; + result->gidlist.gidlist_len = 0; + result->gidlist.gidlist_val = NULL; + gssd_verbose_out("gssd_pname_to_uid: mapped" + " to uid=%d, but no groups\n", + (int)result->uid); + } + } + } + return (TRUE); +} +#endif /* !MK_MITKRB5 */ bool_t gssd_delete_sec_context_1_svc(delete_sec_context_args *argp, delete_sec_context_res *result, struct svc_req *rqstp) { gss_ctx_id_t ctx = gssd_find_resource(argp->ctx); if (ctx) { result->major_status = gss_delete_sec_context( &result->minor_status, &ctx, &result->output_token); gssd_delete_resource(argp->ctx); } else { result->major_status = GSS_S_COMPLETE; result->minor_status = 0; } gssd_verbose_out("gssd_delete_sec_context: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_export_sec_context_1_svc(export_sec_context_args *argp, export_sec_context_res *result, struct svc_req *rqstp) { gss_ctx_id_t ctx = gssd_find_resource(argp->ctx); if (ctx) { result->major_status = gss_export_sec_context( &result->minor_status, &ctx, &result->interprocess_token); result->format = KGSS_HEIMDAL_1_1; gssd_delete_resource(argp->ctx); } else { result->major_status = GSS_S_FAILURE; result->minor_status = 0; result->interprocess_token.length = 0; result->interprocess_token.value = NULL; } gssd_verbose_out("gssd_export_sec_context: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_import_name_1_svc(import_name_args *argp, import_name_res *result, struct svc_req *rqstp) { gss_name_t name; result->major_status = gss_import_name(&result->minor_status, &argp->input_name_buffer, argp->input_name_type, &name); gssd_verbose_out("gssd_import_name: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status == GSS_S_COMPLETE) result->output_name = gssd_make_resource(name); else result->output_name = 0; return (TRUE); } /* * If the name is a numeric IP host address, do a DNS lookup on it and * return the DNS name in a malloc'd string. */ static char * gssd_conv_ip_to_dns(int len, char *name) { struct sockaddr_in sin; struct sockaddr_in6 sin6; char *retcp; retcp = NULL; if (len > 0) { retcp = mem_alloc(NI_MAXHOST); memcpy(retcp, name, len); retcp[len] = '\0'; if (inet_pton(AF_INET, retcp, &sin.sin_addr) != 0) { sin.sin_family = AF_INET; sin.sin_len = sizeof(sin); sin.sin_port = 0; if (getnameinfo((struct sockaddr *)&sin, sizeof(sin), retcp, NI_MAXHOST, NULL, 0, NI_NAMEREQD) != 0) { mem_free(retcp, NI_MAXHOST); return (NULL); } } else if (inet_pton(AF_INET6, retcp, &sin6.sin6_addr) != 0) { sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(sin6); sin6.sin6_port = 0; if (getnameinfo((struct sockaddr *)&sin6, sizeof(sin6), retcp, NI_MAXHOST, NULL, 0, NI_NAMEREQD) != 0) { mem_free(retcp, NI_MAXHOST); return (NULL); } } else { mem_free(retcp, NI_MAXHOST); return (NULL); } gssd_verbose_out("gssd_conv_ip_to_dns: %s\n", retcp); } return (retcp); } bool_t gssd_canonicalize_name_1_svc(canonicalize_name_args *argp, canonicalize_name_res *result, struct svc_req *rqstp) { gss_name_t name = gssd_find_resource(argp->input_name); gss_name_t output_name; memset(result, 0, sizeof(*result)); if (!name) { result->major_status = GSS_S_BAD_NAME; return (TRUE); } result->major_status = gss_canonicalize_name(&result->minor_status, name, argp->mech_type, &output_name); gssd_verbose_out("gssd_canonicalize_name: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status == GSS_S_COMPLETE) result->output_name = gssd_make_resource(output_name); else result->output_name = 0; return (TRUE); } bool_t gssd_export_name_1_svc(export_name_args *argp, export_name_res *result, struct svc_req *rqstp) { gss_name_t name = gssd_find_resource(argp->input_name); memset(result, 0, sizeof(*result)); if (!name) { result->major_status = GSS_S_BAD_NAME; gssd_verbose_out("gssd_export_name: name resource not found\n"); return (TRUE); } result->major_status = gss_export_name(&result->minor_status, name, &result->exported_name); gssd_verbose_out("gssd_export_name: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_release_name_1_svc(release_name_args *argp, release_name_res *result, struct svc_req *rqstp) { gss_name_t name = gssd_find_resource(argp->input_name); if (name) { result->major_status = gss_release_name(&result->minor_status, &name); gssd_delete_resource(argp->input_name); } else { result->major_status = GSS_S_COMPLETE; result->minor_status = 0; } gssd_verbose_out("gssd_release_name: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_pname_to_uid_1_svc(pname_to_uid_args *argp, pname_to_uid_res *result, struct svc_req *rqstp) { gss_name_t name = gssd_find_resource(argp->pname); uid_t uid; char buf[1024], *bufp; struct passwd pwd, *pw; size_t buflen; int error; static size_t buflen_hint = 1024; memset(result, 0, sizeof(*result)); if (name) { result->major_status = gss_pname_to_uid(&result->minor_status, name, argp->mech, &uid); if (result->major_status == GSS_S_COMPLETE) { result->uid = uid; buflen = buflen_hint; for (;;) { pw = NULL; bufp = buf; if (buflen > sizeof(buf)) bufp = malloc(buflen); if (bufp == NULL) break; error = getpwuid_r(uid, &pwd, bufp, buflen, &pw); if (error != ERANGE) break; if (buflen > sizeof(buf)) free(bufp); buflen += 1024; if (buflen > buflen_hint) buflen_hint = buflen; } if (pw) { int len = NGROUPS; int groups[NGROUPS]; result->gid = pw->pw_gid; getgrouplist(pw->pw_name, pw->pw_gid, groups, &len); result->gidlist.gidlist_len = len; result->gidlist.gidlist_val = mem_alloc(len * sizeof(int)); memcpy(result->gidlist.gidlist_val, groups, len * sizeof(int)); gssd_verbose_out("gssd_pname_to_uid: mapped" " to uid=%d, gid=%d\n", (int)result->uid, (int)result->gid); } else { result->gid = 65534; result->gidlist.gidlist_len = 0; result->gidlist.gidlist_val = NULL; gssd_verbose_out("gssd_pname_to_uid: mapped" " to uid=%d, but no groups\n", (int)result->uid); } if (bufp != NULL && buflen > sizeof(buf)) free(bufp); } else gssd_verbose_out("gssd_pname_to_uid: failed major=0x%x" " minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); } else { result->major_status = GSS_S_BAD_NAME; result->minor_status = 0; gssd_verbose_out("gssd_pname_to_uid: no name\n"); } return (TRUE); } bool_t gssd_acquire_cred_1_svc(acquire_cred_args *argp, acquire_cred_res *result, struct svc_req *rqstp) { gss_name_t desired_name = GSS_C_NO_NAME; gss_cred_id_t cred; char ccname[PATH_MAX + 5 + 1], *cp, *cp2; int gotone; -#ifndef WITHOUT_KERBEROS gss_buffer_desc namebuf; uint32_t minstat; krb5_error_code kret; -#endif memset(result, 0, sizeof(*result)); if (argp->desired_name) { desired_name = gssd_find_resource(argp->desired_name); if (!desired_name) { result->major_status = GSS_S_BAD_NAME; gssd_verbose_out("gssd_acquire_cred: no desired name" " found\n"); return (TRUE); } } -#ifndef WITHOUT_KERBEROS if (hostbased_initiator_cred != 0 && argp->desired_name != 0 && argp->uid == 0 && argp->cred_usage == GSS_C_INITIATE) { /* This is a host based initiator name in the keytab file. */ snprintf(ccname, sizeof(ccname), "FILE:%s", GSSD_CREDENTIAL_CACHE_FILE); setenv("KRB5CCNAME", ccname, TRUE); result->major_status = gss_display_name(&result->minor_status, desired_name, &namebuf, NULL); gssd_verbose_out("gssd_acquire_cred: desired name for host " "based initiator cred major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status != GSS_S_COMPLETE) return (TRUE); if (namebuf.length > PATH_MAX + 5) { result->minor_status = 0; result->major_status = GSS_S_FAILURE; return (TRUE); } memcpy(ccname, namebuf.value, namebuf.length); ccname[namebuf.length] = '\0'; if ((cp = strchr(ccname, '@')) != NULL) *cp = '/'; kret = gssd_get_cc_from_keytab(ccname); gssd_verbose_out("gssd_acquire_cred: using keytab entry for " "%s, kerberos ret=%d\n", ccname, (int)kret); gss_release_buffer(&minstat, &namebuf); if (kret != 0) { result->minor_status = kret; result->major_status = GSS_S_FAILURE; return (TRUE); } - } else -#endif /* !WITHOUT_KERBEROS */ - if (ccfile_dirlist[0] != '\0' && argp->desired_name == 0) { + } else if (ccfile_dirlist[0] != '\0' && argp->desired_name == 0) { /* * For the "-s" case and no name provided as an * argument, search the directory list for an appropriate * credential cache file. If the search fails, return failure. */ gotone = 0; cp = ccfile_dirlist; do { cp2 = strchr(cp, ':'); if (cp2 != NULL) *cp2 = '\0'; gotone = find_ccache_file(cp, argp->uid, ccname); if (gotone != 0) break; if (cp2 != NULL) *cp2++ = ':'; cp = cp2; } while (cp != NULL && *cp != '\0'); if (gotone == 0) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_acquire_cred: no cred cache" " file found\n"); return (TRUE); } setenv("KRB5CCNAME", ccname, TRUE); } else { /* * If there wasn't a "-s" option or the name has * been provided as an argument, do it the old way. * When a name is provided, it will normally exist in the * default keytab file and the uid will be root. */ if (argp->desired_name != 0 && argp->uid != 0) { if (debug_level == 0) syslog(LOG_ERR, "gss_acquire_cred:" " principal_name for non-root"); else fprintf(stderr, "gss_acquire_cred:" " principal_name for non-root\n"); } snprintf(ccname, sizeof(ccname), "FILE:/tmp/krb5cc_%d", (int) argp->uid); setenv("KRB5CCNAME", ccname, TRUE); } result->major_status = gss_acquire_cred(&result->minor_status, desired_name, argp->time_req, argp->desired_mechs, argp->cred_usage, &cred, &result->actual_mechs, &result->time_rec); gssd_verbose_out("gssd_acquire_cred: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status == GSS_S_COMPLETE) result->output_cred = gssd_make_resource(cred); else result->output_cred = 0; return (TRUE); } bool_t gssd_set_cred_option_1_svc(set_cred_option_args *argp, set_cred_option_res *result, struct svc_req *rqstp) { gss_cred_id_t cred = gssd_find_resource(argp->cred); memset(result, 0, sizeof(*result)); if (!cred) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_set_cred: no credentials\n"); return (TRUE); } result->major_status = gss_set_cred_option(&result->minor_status, &cred, argp->option_name, &argp->option_value); gssd_verbose_out("gssd_set_cred: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_release_cred_1_svc(release_cred_args *argp, release_cred_res *result, struct svc_req *rqstp) { gss_cred_id_t cred = gssd_find_resource(argp->cred); if (cred) { result->major_status = gss_release_cred(&result->minor_status, &cred); gssd_delete_resource(argp->cred); } else { result->major_status = GSS_S_COMPLETE; result->minor_status = 0; } gssd_verbose_out("gssd_release_cred: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_display_status_1_svc(display_status_args *argp, display_status_res *result, struct svc_req *rqstp) { result->message_context = argp->message_context; result->major_status = gss_display_status(&result->minor_status, argp->status_value, argp->status_type, argp->mech_type, &result->message_context, &result->status_string); gssd_verbose_out("gssd_display_status: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_ip_to_dns_1_svc(ip_to_dns_args *argp, ip_to_dns_res *result, struct svc_req *rqstp) { char *host; memset(result, 0, sizeof(*result)); /* Check to see if the name is actually an IP address. */ host = gssd_conv_ip_to_dns(argp->ip_addr.ip_addr_len, argp->ip_addr.ip_addr_val); if (host != NULL) { result->major_status = GSS_S_COMPLETE; result->dns_name.dns_name_len = strlen(host); result->dns_name.dns_name_val = host; return (TRUE); } result->major_status = GSS_S_FAILURE; return (TRUE); } int gssd_1_freeresult(SVCXPRT *transp, xdrproc_t xdr_result, caddr_t result) { /* * We don't use XDR to free the results - anything which was * allocated came from GSS-API. We use xdr_result to figure * out what to do. */ OM_uint32 junk; if (xdr_result == (xdrproc_t) xdr_init_sec_context_res) { init_sec_context_res *p = (init_sec_context_res *) result; gss_release_buffer(&junk, &p->output_token); } else if (xdr_result == (xdrproc_t) xdr_accept_sec_context_res) { accept_sec_context_res *p = (accept_sec_context_res *) result; gss_release_buffer(&junk, &p->output_token); } else if (xdr_result == (xdrproc_t) xdr_delete_sec_context_res) { delete_sec_context_res *p = (delete_sec_context_res *) result; gss_release_buffer(&junk, &p->output_token); } else if (xdr_result == (xdrproc_t) xdr_export_sec_context_res) { export_sec_context_res *p = (export_sec_context_res *) result; if (p->interprocess_token.length) memset(p->interprocess_token.value, 0, p->interprocess_token.length); gss_release_buffer(&junk, &p->interprocess_token); } else if (xdr_result == (xdrproc_t) xdr_export_name_res) { export_name_res *p = (export_name_res *) result; gss_release_buffer(&junk, &p->exported_name); } else if (xdr_result == (xdrproc_t) xdr_acquire_cred_res) { acquire_cred_res *p = (acquire_cred_res *) result; gss_release_oid_set(&junk, &p->actual_mechs); } else if (xdr_result == (xdrproc_t) xdr_pname_to_uid_res) { pname_to_uid_res *p = (pname_to_uid_res *) result; if (p->gidlist.gidlist_val) free(p->gidlist.gidlist_val); } else if (xdr_result == (xdrproc_t) xdr_display_status_res) { display_status_res *p = (display_status_res *) result; gss_release_buffer(&junk, &p->status_string); } return (TRUE); } /* * Search a directory for the most likely candidate to be used as the * credential cache for a uid. If successful, return 1 and fill the * file's path id into "rpath". Otherwise, return 0. */ static int find_ccache_file(const char *dirpath, uid_t uid, char *rpath) { DIR *dirp; struct dirent *dp; struct stat sb; time_t exptime, oexptime; int gotone, len, rating, orating; char namepath[PATH_MAX + 5 + 1]; char retpath[PATH_MAX + 5 + 1]; dirp = opendir(dirpath); if (dirp == NULL) return (0); gotone = 0; orating = 0; oexptime = 0; while ((dp = readdir(dirp)) != NULL) { len = snprintf(namepath, sizeof(namepath), "%s/%s", dirpath, dp->d_name); if (len < sizeof(namepath) && (hostbased_initiator_cred == 0 || strcmp(namepath, GSSD_CREDENTIAL_CACHE_FILE) != 0) && strstr(dp->d_name, ccfile_substring) != NULL && lstat(namepath, &sb) >= 0 && sb.st_uid == uid && S_ISREG(sb.st_mode)) { len = snprintf(namepath, sizeof(namepath), "FILE:%s/%s", dirpath, dp->d_name); if (len < sizeof(namepath) && is_a_valid_tgt_cache(namepath, uid, &rating, &exptime) != 0) { if (gotone == 0 || rating > orating || (rating == orating && exptime > oexptime)) { orating = rating; oexptime = exptime; strcpy(retpath, namepath); gotone = 1; } } } } closedir(dirp); if (gotone != 0) { strcpy(rpath, retpath); return (1); } return (0); } /* * Try to determine if the file is a valid tgt cache file. * Check that the file has a valid tgt for a principal. * If it does, return 1, otherwise return 0. * It also returns a "rating" and the expiry time for the TGT, when found. * This "rating" is higher based on heuristics that make it more * likely to be the correct credential cache file to use. It can * be used by the caller, along with expiry time, to select from * multiple credential cache files. */ static int is_a_valid_tgt_cache(const char *filepath, uid_t uid, int *retrating, time_t *retexptime) { -#ifndef WITHOUT_KERBEROS krb5_context context; krb5_principal princ; krb5_ccache ccache; krb5_error_code retval; krb5_cc_cursor curse; krb5_creds krbcred; int gotone, orating, rating, ret; struct passwd *pw; char *cp, *cp2, *pname; time_t exptime; /* Find a likely name for the uid principal. */ pw = getpwuid(uid); /* * Do a bunch of krb5 library stuff to try and determine if * this file is a credentials cache with an appropriate TGT * in it. */ retval = krb5_init_context(&context); if (retval != 0) return (0); retval = krb5_cc_resolve(context, filepath, &ccache); if (retval != 0) { krb5_free_context(context); return (0); } ret = 0; orating = 0; exptime = 0; retval = krb5_cc_start_seq_get(context, ccache, &curse); if (retval == 0) { while ((retval = krb5_cc_next_cred(context, ccache, &curse, &krbcred)) == 0) { gotone = 0; rating = 0; retval = krb5_unparse_name(context, krbcred.server, &pname); if (retval == 0) { cp = strchr(pname, '/'); if (cp != NULL) { *cp++ = '\0'; if (strcmp(pname, "krbtgt") == 0 && krbcred.times.endtime > time(NULL) ) { gotone = 1; /* * Test to see if this is a * tgt for cross-realm auth. * Rate it higher, if it is not. */ cp2 = strchr(cp, '@'); if (cp2 != NULL) { *cp2++ = '\0'; if (strcmp(cp, cp2) == 0) rating++; } } } free(pname); } if (gotone != 0) { retval = krb5_unparse_name(context, krbcred.client, &pname); if (retval == 0) { cp = strchr(pname, '@'); if (cp != NULL) { *cp++ = '\0'; if (pw != NULL && strcmp(pname, pw->pw_name) == 0) rating++; if (strchr(pname, '/') == NULL) rating++; if (pref_realm[0] != '\0' && strcmp(cp, pref_realm) == 0) rating++; } } free(pname); if (rating > orating) { orating = rating; exptime = krbcred.times.endtime; } else if (rating == orating && krbcred.times.endtime > exptime) exptime = krbcred.times.endtime; ret = 1; } krb5_free_cred_contents(context, &krbcred); } krb5_cc_end_seq_get(context, ccache, &curse); } krb5_cc_close(context, ccache); krb5_free_context(context); if (ret != 0) { *retrating = orating; *retexptime = exptime; } return (ret); -#else /* WITHOUT_KERBEROS */ - return (0); -#endif /* !WITHOUT_KERBEROS */ } -#ifndef WITHOUT_KERBEROS /* * This function attempts to do essentially a "kinit -k" for the principal * name provided as the argument, so that there will be a TGT in the * credential cache. */ static krb5_error_code gssd_get_cc_from_keytab(const char *name) { krb5_error_code ret, opt_ret, princ_ret, cc_ret, kt_ret, cred_ret; krb5_context context; krb5_principal principal; krb5_keytab kt; krb5_creds cred; krb5_get_init_creds_opt *opt; krb5_deltat start_time = 0; krb5_ccache ccache; ret = krb5_init_context(&context); if (ret != 0) return (ret); opt_ret = cc_ret = kt_ret = cred_ret = 1; /* anything non-zero */ princ_ret = ret = krb5_parse_name(context, name, &principal); if (ret == 0) opt_ret = ret = krb5_get_init_creds_opt_alloc(context, &opt); if (ret == 0) cc_ret = ret = krb5_cc_default(context, &ccache); if (ret == 0) ret = krb5_cc_initialize(context, ccache, principal); if (ret == 0) { #ifndef MK_MITKRB5 /* For Heimdal only */ krb5_get_init_creds_opt_set_default_flags(context, "gssd", krb5_principal_get_realm(context, principal), opt); #endif kt_ret = ret = krb5_kt_default(context, &kt); } if (ret == 0) cred_ret = ret = krb5_get_init_creds_keytab(context, &cred, principal, kt, start_time, NULL, opt); if (ret == 0) ret = krb5_cc_store_cred(context, ccache, &cred); if (kt_ret == 0) krb5_kt_close(context, kt); if (cc_ret == 0) krb5_cc_close(context, ccache); if (opt_ret == 0) krb5_get_init_creds_opt_free(context, opt); if (princ_ret == 0) krb5_free_principal(context, principal); if (cred_ret == 0) krb5_free_cred_contents(context, &cred); krb5_free_context(context); return (ret); } /* * Acquire a gss credential for a uid. */ static OM_uint32 gssd_get_user_cred(OM_uint32 *min_statp, uid_t uid, gss_cred_id_t *credp) { gss_buffer_desc principal_desc; gss_name_t name; OM_uint32 maj_stat, min_stat; gss_OID_set mechlist; struct passwd *pw; pw = getpwuid(uid); if (pw == NULL) { *min_statp = 0; return (GSS_S_FAILURE); } /* * The mechanism must be set to KerberosV for acquisition * of credentials to work reliably. */ maj_stat = gss_create_empty_oid_set(min_statp, &mechlist); if (maj_stat != GSS_S_COMPLETE) return (maj_stat); maj_stat = gss_add_oid_set_member(min_statp, GSS_KRB5_MECH_OID_X, &mechlist); if (maj_stat != GSS_S_COMPLETE) { gss_release_oid_set(&min_stat, &mechlist); return (maj_stat); } principal_desc.value = (void *)pw->pw_name; principal_desc.length = strlen(pw->pw_name); maj_stat = gss_import_name(min_statp, &principal_desc, GSS_C_NT_USER_NAME, &name); if (maj_stat != GSS_S_COMPLETE) { gss_release_oid_set(&min_stat, &mechlist); return (maj_stat); } /* Acquire the credentials. */ maj_stat = gss_acquire_cred(min_statp, name, 0, mechlist, GSS_C_INITIATE, credp, NULL, NULL); gss_release_name(&min_stat, &name); gss_release_oid_set(&min_stat, &mechlist); return (maj_stat); } -#endif /* !WITHOUT_KERBEROS */ void gssd_terminate(int sig __unused) { -#ifndef WITHOUT_KERBEROS if (hostbased_initiator_cred != 0) unlink(GSSD_CREDENTIAL_CACHE_FILE); -#endif exit(0); }