diff --git a/sys/rpc/auth.h b/sys/rpc/auth.h index fd56b33da52e..5444f6180c5e 100644 --- a/sys/rpc/auth.h +++ b/sys/rpc/auth.h @@ -1,375 +1,375 @@ /* $NetBSD: auth.h,v 1.15 2000/06/02 22:57:55 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * from: @(#)auth.h 1.17 88/02/08 SMI * from: @(#)auth.h 2.3 88/08/07 4.0 RPCSRC * from: @(#)auth.h 1.43 98/02/02 SMI * $FreeBSD$ */ /* * auth.h, Authentication interface. * * Copyright (C) 1984, Sun Microsystems, Inc. * * The data structures are completely opaque to the client. The client * is required to pass an AUTH * to routines that create rpc * "sessions". */ #ifndef _RPC_AUTH_H #define _RPC_AUTH_H #include #include #include #include #define MAX_AUTH_BYTES 400 #define MAXNETNAMELEN 255 /* maximum length of network user's name */ /* * Client side authentication/security data */ typedef struct sec_data { u_int secmod; /* security mode number e.g. in nfssec.conf */ u_int rpcflavor; /* rpc flavors:AUTH_UNIX,AUTH_DES,RPCSEC_GSS */ int flags; /* AUTH_F_xxx flags */ caddr_t data; /* opaque data per flavor */ } sec_data_t; #ifdef _SYSCALL32_IMPL struct sec_data32 { uint32_t secmod; /* security mode number e.g. in nfssec.conf */ uint32_t rpcflavor; /* rpc flavors:AUTH_UNIX,AUTH_DES,RPCSEC_GSS */ int32_t flags; /* AUTH_F_xxx flags */ caddr32_t data; /* opaque data per flavor */ }; #endif /* _SYSCALL32_IMPL */ /* * AUTH_DES flavor specific data from sec_data opaque data field. * AUTH_KERB has the same structure. */ typedef struct des_clnt_data { struct netbuf syncaddr; /* time sync addr */ struct knetconfig *knconf; /* knetconfig info that associated */ /* with the syncaddr. */ char *netname; /* server's netname */ int netnamelen; /* server's netname len */ } dh_k4_clntdata_t; #ifdef _SYSCALL32_IMPL struct des_clnt_data32 { struct netbuf32 syncaddr; /* time sync addr */ caddr32_t knconf; /* knetconfig info that associated */ /* with the syncaddr. */ caddr32_t netname; /* server's netname */ int32_t netnamelen; /* server's netname len */ }; #endif /* _SYSCALL32_IMPL */ #ifdef KERBEROS /* * flavor specific data to hold the data for AUTH_DES/AUTH_KERB(v4) * in sec_data->data opaque field. */ typedef struct krb4_svc_data { int window; /* window option value */ } krb4_svcdata_t; typedef struct krb4_svc_data des_svcdata_t; #endif /* KERBEROS */ /* * authentication/security specific flags */ #define AUTH_F_RPCTIMESYNC 0x001 /* use RPC to do time sync */ #define AUTH_F_TRYNONE 0x002 /* allow fall back to AUTH_NONE */ /* * Status returned from authentication check */ enum auth_stat { AUTH_OK=0, /* * failed at remote end */ AUTH_BADCRED=1, /* bogus credentials (seal broken) */ AUTH_REJECTEDCRED=2, /* client should begin new session */ AUTH_BADVERF=3, /* bogus verifier (seal broken) */ AUTH_REJECTEDVERF=4, /* verifier expired or was replayed */ AUTH_TOOWEAK=5, /* rejected due to security reasons */ /* * failed locally */ AUTH_INVALIDRESP=6, /* bogus response verifier */ AUTH_FAILED=7, /* some unknown reason */ #ifdef KERBEROS /* * kerberos errors */ , AUTH_KERB_GENERIC = 8, /* kerberos generic error */ AUTH_TIMEEXPIRE = 9, /* time of credential expired */ AUTH_TKT_FILE = 10, /* something wrong with ticket file */ AUTH_DECODE = 11, /* can't decode authenticator */ AUTH_NET_ADDR = 12, /* wrong net address in ticket */ #endif /* KERBEROS */ /* * RPCSEC_GSS errors */ RPCSEC_GSS_CREDPROBLEM = 13, RPCSEC_GSS_CTXPROBLEM = 14, /* * RPC-over-TLS errors */ AUTH_NEEDS_TLS = 15, AUTH_NEEDS_TLS_MUTUAL_HOST = 16, /* Also used by RPCSEC_TLS for the same purpose */ RPCSEC_GSS_NODISPATCH = 0x8000000 }; union des_block { struct { uint32_t high; uint32_t low; } key; char c[8]; }; typedef union des_block des_block; __BEGIN_DECLS extern bool_t xdr_des_block(XDR *, des_block *); __END_DECLS /* * Authentication info. Opaque to client. */ struct opaque_auth { enum_t oa_flavor; /* flavor of auth */ caddr_t oa_base; /* address of more auth stuff */ u_int oa_length; /* not to exceed MAX_AUTH_BYTES */ }; /* * Auth handle, interface to client side authenticators. */ struct rpc_err; typedef struct __auth { struct opaque_auth ah_cred; struct opaque_auth ah_verf; union des_block ah_key; - struct auth_ops { + const struct auth_ops { void (*ah_nextverf) (struct __auth *); /* nextverf & serialize */ int (*ah_marshal) (struct __auth *, uint32_t, XDR *, struct mbuf *); /* validate verifier */ int (*ah_validate) (struct __auth *, uint32_t, struct opaque_auth *, struct mbuf **); /* refresh credentials */ int (*ah_refresh) (struct __auth *, void *); /* destroy this structure */ void (*ah_destroy) (struct __auth *); } *ah_ops; void *ah_private; } AUTH; /* * Authentication ops. * The ops and the auth handle provide the interface to the authenticators. * * AUTH *auth; * XDR *xdrs; * struct opaque_auth verf; */ #define AUTH_NEXTVERF(auth) \ ((*((auth)->ah_ops->ah_nextverf))(auth)) #define AUTH_MARSHALL(auth, xid, xdrs, args) \ ((*((auth)->ah_ops->ah_marshal))(auth, xid, xdrs, args)) #define AUTH_VALIDATE(auth, xid, verfp, resultsp) \ ((*((auth)->ah_ops->ah_validate))((auth), xid, verfp, resultsp)) #define AUTH_REFRESH(auth, msg) \ ((*((auth)->ah_ops->ah_refresh))(auth, msg)) #define AUTH_DESTROY(auth) \ ((*((auth)->ah_ops->ah_destroy))(auth)) __BEGIN_DECLS extern struct opaque_auth _null_auth; __END_DECLS /* * These are the various implementations of client side authenticators. */ /* * System style authentication * AUTH *authunix_create(machname, uid, gid, len, aup_gids) * char *machname; * u_int uid; * u_int gid; * int len; * u_int *aup_gids; */ __BEGIN_DECLS #ifdef _KERNEL struct ucred; extern AUTH *authunix_create(struct ucred *); #else extern AUTH *authunix_create(char *, u_int, u_int, int, u_int *); extern AUTH *authunix_create_default(void); /* takes no parameters */ #endif extern AUTH *authnone_create(void); /* takes no parameters */ extern AUTH *authtls_create(void); /* takes no parameters */ __END_DECLS /* * DES style authentication * AUTH *authsecdes_create(servername, window, timehost, ckey) * char *servername; - network name of server * u_int window; - time to live * const char *timehost; - optional hostname to sync with * des_block *ckey; - optional conversation key to use */ __BEGIN_DECLS extern AUTH *authdes_create (char *, u_int, struct sockaddr *, des_block *); extern AUTH *authdes_seccreate (const char *, const u_int, const char *, const des_block *); __END_DECLS __BEGIN_DECLS extern bool_t xdr_opaque_auth (XDR *, struct opaque_auth *); __END_DECLS #define authsys_create(c,i1,i2,i3,ip) authunix_create((c),(i1),(i2),(i3),(ip)) #define authsys_create_default() authunix_create_default() /* * Netname manipulation routines. */ __BEGIN_DECLS extern int getnetname(char *); extern int host2netname(char *, const char *, const char *); extern int user2netname(char *, const uid_t, const char *); extern int netname2user(char *, uid_t *, gid_t *, int *, gid_t *); extern int netname2host(char *, char *, const int); extern void passwd2des ( char *, char * ); __END_DECLS /* * * These routines interface to the keyserv daemon * */ __BEGIN_DECLS extern int key_decryptsession(const char *, des_block *); extern int key_encryptsession(const char *, des_block *); extern int key_gendes(des_block *); extern int key_setsecret(const char *); extern int key_secretkey_is_set(void); __END_DECLS /* * Publickey routines. */ __BEGIN_DECLS extern int getpublickey (const char *, char *); extern int getpublicandprivatekey (const char *, char *); extern int getsecretkey (char *, char *, char *); __END_DECLS #ifdef KERBEROS /* * Kerberos style authentication * AUTH *authkerb_seccreate(service, srv_inst, realm, window, timehost, status) * const char *service; - service name * const char *srv_inst; - server instance * const char *realm; - server realm * const u_int window; - time to live * const char *timehost; - optional hostname to sync with * int *status; - kerberos status returned */ __BEGIN_DECLS extern AUTH *authkerb_seccreate(const char *, const char *, const char *, const u_int, const char *, int *); __END_DECLS /* * Map a kerberos credential into a unix cred. * * authkerb_getucred(rqst, uid, gid, grouplen, groups) * const struct svc_req *rqst; - request pointer * uid_t *uid; * gid_t *gid; * short *grouplen; * int *groups; * */ __BEGIN_DECLS extern int authkerb_getucred(/* struct svc_req *, uid_t *, gid_t *, short *, int * */); __END_DECLS #endif /* KERBEROS */ __BEGIN_DECLS struct svc_req; struct rpc_msg; enum auth_stat _svcauth_null (struct svc_req *, struct rpc_msg *); enum auth_stat _svcauth_short (struct svc_req *, struct rpc_msg *); enum auth_stat _svcauth_unix (struct svc_req *, struct rpc_msg *); enum auth_stat _svcauth_rpcsec_tls (struct svc_req *, struct rpc_msg *); __END_DECLS #define AUTH_NONE 0 /* no authentication */ #define AUTH_NULL 0 /* backward compatibility */ #define AUTH_SYS 1 /* unix style (uid, gids) */ #define AUTH_UNIX AUTH_SYS #define AUTH_SHORT 2 /* short hand unix style */ #define AUTH_DH 3 /* for Diffie-Hellman mechanism */ #define AUTH_DES AUTH_DH /* for backward compatibility */ #define AUTH_KERB 4 /* kerberos style */ #define RPCSEC_GSS 6 /* RPCSEC_GSS */ #define AUTH_TLS 7 /* Initiate RPC-over-TLS */ /* * Pseudo auth flavors for RPCSEC_GSS. */ #define RPCSEC_GSS_KRB5 390003 #define RPCSEC_GSS_KRB5I 390004 #define RPCSEC_GSS_KRB5P 390005 #endif /* !_RPC_AUTH_H */ diff --git a/sys/rpc/auth_none.c b/sys/rpc/auth_none.c index 236b4aa166ae..91597114c069 100644 --- a/sys/rpc/auth_none.c +++ b/sys/rpc/auth_none.c @@ -1,157 +1,157 @@ /* $NetBSD: auth_none.c,v 1.13 2000/01/22 22:19:17 mycroft Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)auth_none.c 1.19 87/08/11 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)auth_none.c 2.1 88/07/29 4.0 RPCSRC"; #endif #include __FBSDID("$FreeBSD$"); /* * auth_none.c * Creates a client authentication handle for passing "null" * credentials and verifiers to remote systems. * * Copyright (C) 1984, Sun Microsystems, Inc. */ #include #include #include #include #include #include #include #include #include #include #define MAX_MARSHAL_SIZE 20 /* * Authenticator operations routines */ static bool_t authnone_marshal (AUTH *, uint32_t, XDR *, struct mbuf *); static void authnone_verf (AUTH *); static bool_t authnone_validate (AUTH *, uint32_t, struct opaque_auth *, struct mbuf **); static bool_t authnone_refresh (AUTH *, void *); static void authnone_destroy (AUTH *); -static struct auth_ops authnone_ops = { +static const struct auth_ops authnone_ops = { .ah_nextverf = authnone_verf, .ah_marshal = authnone_marshal, .ah_validate = authnone_validate, .ah_refresh = authnone_refresh, .ah_destroy = authnone_destroy, }; struct authnone_private { AUTH no_client; char mclient[MAX_MARSHAL_SIZE]; u_int mcnt; }; static struct authnone_private authnone_private; static void authnone_init(void *dummy) { struct authnone_private *ap = &authnone_private; XDR xdrs; ap->no_client.ah_cred = ap->no_client.ah_verf = _null_auth; ap->no_client.ah_ops = &authnone_ops; xdrmem_create(&xdrs, ap->mclient, MAX_MARSHAL_SIZE, XDR_ENCODE); xdr_opaque_auth(&xdrs, &ap->no_client.ah_cred); xdr_opaque_auth(&xdrs, &ap->no_client.ah_verf); ap->mcnt = XDR_GETPOS(&xdrs); XDR_DESTROY(&xdrs); } SYSINIT(authnone_init, SI_SUB_KMEM, SI_ORDER_ANY, authnone_init, NULL); AUTH * authnone_create() { struct authnone_private *ap = &authnone_private; return (&ap->no_client); } /*ARGSUSED*/ static bool_t authnone_marshal(AUTH *client, uint32_t xid, XDR *xdrs, struct mbuf *args) { struct authnone_private *ap = &authnone_private; KASSERT(xdrs != NULL, ("authnone_marshal: xdrs is null")); if (!XDR_PUTBYTES(xdrs, ap->mclient, ap->mcnt)) return (FALSE); xdrmbuf_append(xdrs, args); return (TRUE); } /* All these unused parameters are required to keep ANSI-C from grumbling */ /*ARGSUSED*/ static void authnone_verf(AUTH *client) { } /*ARGSUSED*/ static bool_t authnone_validate(AUTH *client, uint32_t xid, struct opaque_auth *opaque, struct mbuf **mrepp) { return (TRUE); } /*ARGSUSED*/ static bool_t authnone_refresh(AUTH *client, void *dummy) { return (FALSE); } /*ARGSUSED*/ static void authnone_destroy(AUTH *client) { } diff --git a/sys/rpc/auth_unix.c b/sys/rpc/auth_unix.c index 4a3df5f70e3a..be0a241baa36 100644 --- a/sys/rpc/auth_unix.c +++ b/sys/rpc/auth_unix.c @@ -1,380 +1,380 @@ /* $NetBSD: auth_unix.c,v 1.18 2000/07/06 03:03:30 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)auth_unix.c 1.19 87/08/11 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)auth_unix.c 2.2 88/08/01 4.0 RPCSRC"; #endif #include __FBSDID("$FreeBSD$"); /* * auth_unix.c, Implements UNIX style authentication parameters. * * Copyright (C) 1984, Sun Microsystems, Inc. * * The system is very weak. The client uses no encryption for it's * credentials and only sends null verifiers. The server sends backs * null verifiers or optionally a verifier that suggests a new short hand * for the credentials. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* auth_unix.c */ static void authunix_nextverf (AUTH *); static bool_t authunix_marshal (AUTH *, uint32_t, XDR *, struct mbuf *); static bool_t authunix_validate (AUTH *, uint32_t, struct opaque_auth *, struct mbuf **); static bool_t authunix_refresh (AUTH *, void *); static void authunix_destroy (AUTH *); static void marshal_new_auth (AUTH *); -static struct auth_ops authunix_ops = { +static const struct auth_ops authunix_ops = { .ah_nextverf = authunix_nextverf, .ah_marshal = authunix_marshal, .ah_validate = authunix_validate, .ah_refresh = authunix_refresh, .ah_destroy = authunix_destroy, }; /* * This struct is pointed to by the ah_private field of an auth_handle. */ struct audata { TAILQ_ENTRY(audata) au_link; TAILQ_ENTRY(audata) au_alllink; volatile u_int au_refs; struct xucred au_xcred; struct opaque_auth au_origcred; /* original credentials */ struct opaque_auth au_shcred; /* short hand cred */ u_long au_shfaults; /* short hand cache faults */ char au_marshed[MAX_AUTH_BYTES]; u_int au_mpos; /* xdr pos at end of marshed */ AUTH *au_auth; /* link back to AUTH */ }; TAILQ_HEAD(audata_list, audata); #define AUTH_PRIVATE(auth) ((struct audata *)auth->ah_private) #define AUTH_UNIX_HASH_SIZE 16 #define AUTH_UNIX_MAX 256 static struct audata_list auth_unix_cache[AUTH_UNIX_HASH_SIZE]; static struct audata_list auth_unix_all; static struct sx auth_unix_lock; static int auth_unix_count; static void authunix_init(void *dummy) { int i; for (i = 0; i < AUTH_UNIX_HASH_SIZE; i++) TAILQ_INIT(&auth_unix_cache[i]); TAILQ_INIT(&auth_unix_all); sx_init(&auth_unix_lock, "auth_unix_lock"); } SYSINIT(authunix_init, SI_SUB_KMEM, SI_ORDER_ANY, authunix_init, NULL); /* * Create a unix style authenticator. * Returns an auth handle with the given stuff in it. */ AUTH * authunix_create(struct ucred *cred) { uint32_t h, th; struct xucred xcr; char mymem[MAX_AUTH_BYTES]; XDR xdrs; AUTH *auth; struct audata *au, *tau; struct timeval now; uint32_t time; int len; if (auth_unix_count > AUTH_UNIX_MAX) { while (auth_unix_count > AUTH_UNIX_MAX) { sx_xlock(&auth_unix_lock); tau = TAILQ_FIRST(&auth_unix_all); th = HASHSTEP(HASHINIT, tau->au_xcred.cr_uid) % AUTH_UNIX_HASH_SIZE; TAILQ_REMOVE(&auth_unix_cache[th], tau, au_link); TAILQ_REMOVE(&auth_unix_all, tau, au_alllink); auth_unix_count--; sx_xunlock(&auth_unix_lock); AUTH_DESTROY(tau->au_auth); } } /* * Hash the uid to see if we already have an AUTH with this cred. */ h = HASHSTEP(HASHINIT, cred->cr_uid) % AUTH_UNIX_HASH_SIZE; cru2x(cred, &xcr); again: sx_slock(&auth_unix_lock); TAILQ_FOREACH(au, &auth_unix_cache[h], au_link) { if (!memcmp(&xcr, &au->au_xcred, sizeof(xcr))) { refcount_acquire(&au->au_refs); if (sx_try_upgrade(&auth_unix_lock)) { /* * Keep auth_unix_all LRU sorted. */ TAILQ_REMOVE(&auth_unix_all, au, au_alllink); TAILQ_INSERT_TAIL(&auth_unix_all, au, au_alllink); sx_xunlock(&auth_unix_lock); } else { sx_sunlock(&auth_unix_lock); } return (au->au_auth); } } sx_sunlock(&auth_unix_lock); /* * Allocate and set up auth handle */ au = NULL; auth = mem_alloc(sizeof(*auth)); au = mem_alloc(sizeof(*au)); auth->ah_ops = &authunix_ops; auth->ah_private = (caddr_t)au; auth->ah_verf = au->au_shcred = _null_auth; refcount_init(&au->au_refs, 1); au->au_xcred = xcr; au->au_shfaults = 0; au->au_origcred.oa_base = NULL; au->au_auth = auth; getmicrotime(&now); time = now.tv_sec; /* * Serialize the parameters into origcred */ xdrmem_create(&xdrs, mymem, MAX_AUTH_BYTES, XDR_ENCODE); cru2x(cred, &xcr); if (! xdr_authunix_parms(&xdrs, &time, &xcr)) panic("authunix_create: failed to encode creds"); au->au_origcred.oa_length = len = XDR_GETPOS(&xdrs); au->au_origcred.oa_flavor = AUTH_UNIX; au->au_origcred.oa_base = mem_alloc((u_int) len); memcpy(au->au_origcred.oa_base, mymem, (size_t)len); /* * set auth handle to reflect new cred. */ auth->ah_cred = au->au_origcred; marshal_new_auth(auth); sx_xlock(&auth_unix_lock); TAILQ_FOREACH(tau, &auth_unix_cache[h], au_link) { if (!memcmp(&xcr, &tau->au_xcred, sizeof(xcr))) { /* * We lost a race to create the AUTH that * matches this cred. */ sx_xunlock(&auth_unix_lock); AUTH_DESTROY(auth); goto again; } } auth_unix_count++; TAILQ_INSERT_TAIL(&auth_unix_cache[h], au, au_link); TAILQ_INSERT_TAIL(&auth_unix_all, au, au_alllink); refcount_acquire(&au->au_refs); /* one for the cache, one for user */ sx_xunlock(&auth_unix_lock); return (auth); } /* * authunix operations */ /* ARGSUSED */ static void authunix_nextverf(AUTH *auth) { /* no action necessary */ } static bool_t authunix_marshal(AUTH *auth, uint32_t xid, XDR *xdrs, struct mbuf *args) { struct audata *au; au = AUTH_PRIVATE(auth); if (!XDR_PUTBYTES(xdrs, au->au_marshed, au->au_mpos)) return (FALSE); xdrmbuf_append(xdrs, args); return (TRUE); } static bool_t authunix_validate(AUTH *auth, uint32_t xid, struct opaque_auth *verf, struct mbuf **mrepp) { struct audata *au; XDR txdrs; if (!verf) return (TRUE); if (verf->oa_flavor == AUTH_SHORT) { au = AUTH_PRIVATE(auth); xdrmem_create(&txdrs, verf->oa_base, verf->oa_length, XDR_DECODE); if (au->au_shcred.oa_base != NULL) { mem_free(au->au_shcred.oa_base, au->au_shcred.oa_length); au->au_shcred.oa_base = NULL; } if (xdr_opaque_auth(&txdrs, &au->au_shcred)) { auth->ah_cred = au->au_shcred; } else { txdrs.x_op = XDR_FREE; (void)xdr_opaque_auth(&txdrs, &au->au_shcred); au->au_shcred.oa_base = NULL; auth->ah_cred = au->au_origcred; } marshal_new_auth(auth); } return (TRUE); } static bool_t authunix_refresh(AUTH *auth, void *dummy) { struct audata *au = AUTH_PRIVATE(auth); struct xucred xcr; uint32_t time; struct timeval now; XDR xdrs; int stat; if (auth->ah_cred.oa_base == au->au_origcred.oa_base) { /* there is no hope. Punt */ return (FALSE); } au->au_shfaults ++; /* first deserialize the creds back into a struct ucred */ xdrmem_create(&xdrs, au->au_origcred.oa_base, au->au_origcred.oa_length, XDR_DECODE); stat = xdr_authunix_parms(&xdrs, &time, &xcr); if (! stat) goto done; /* update the time and serialize in place */ getmicrotime(&now); time = now.tv_sec; xdrs.x_op = XDR_ENCODE; XDR_SETPOS(&xdrs, 0); stat = xdr_authunix_parms(&xdrs, &time, &xcr); if (! stat) goto done; auth->ah_cred = au->au_origcred; marshal_new_auth(auth); done: XDR_DESTROY(&xdrs); return (stat); } static void authunix_destroy(AUTH *auth) { struct audata *au; au = AUTH_PRIVATE(auth); if (!refcount_release(&au->au_refs)) return; mem_free(au->au_origcred.oa_base, au->au_origcred.oa_length); if (au->au_shcred.oa_base != NULL) mem_free(au->au_shcred.oa_base, au->au_shcred.oa_length); mem_free(auth->ah_private, sizeof(struct audata)); if (auth->ah_verf.oa_base != NULL) mem_free(auth->ah_verf.oa_base, auth->ah_verf.oa_length); mem_free(auth, sizeof(*auth)); } /* * Marshals (pre-serializes) an auth struct. * sets private data, au_marshed and au_mpos */ static void marshal_new_auth(AUTH *auth) { XDR xdr_stream; XDR *xdrs = &xdr_stream; struct audata *au; au = AUTH_PRIVATE(auth); xdrmem_create(xdrs, au->au_marshed, MAX_AUTH_BYTES, XDR_ENCODE); if ((! xdr_opaque_auth(xdrs, &(auth->ah_cred))) || (! xdr_opaque_auth(xdrs, &(auth->ah_verf)))) printf("auth_none.c - Fatal marshalling problem"); else au->au_mpos = XDR_GETPOS(xdrs); XDR_DESTROY(xdrs); } diff --git a/sys/rpc/clnt.h b/sys/rpc/clnt.h index 6f8f898ca918..a968c8f930fc 100644 --- a/sys/rpc/clnt.h +++ b/sys/rpc/clnt.h @@ -1,725 +1,725 @@ /* $NetBSD: clnt.h,v 1.14 2000/06/02 22:57:55 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2010, Oracle America, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the "Oracle America, Inc." nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * from: @(#)clnt.h 1.31 94/04/29 SMI * from: @(#)clnt.h 2.1 88/07/29 4.0 RPCSRC * $FreeBSD$ */ /* * clnt.h - Client side remote procedure call interface. * * Copyright (c) 1986-1991,1994-1999 by Sun Microsystems, Inc. * All rights reserved. */ #ifndef _RPC_CLNT_H_ #define _RPC_CLNT_H_ #include #include #ifdef _KERNEL #include #include #else #include #endif #include /* * Well-known IPV6 RPC broadcast address. */ #define RPCB_MULTICAST_ADDR "ff02::202" /* * the following errors are in general unrecoverable. The caller * should give up rather than retry. */ #define IS_UNRECOVERABLE_RPC(s) (((s) == RPC_AUTHERROR) || \ ((s) == RPC_CANTENCODEARGS) || \ ((s) == RPC_CANTDECODERES) || \ ((s) == RPC_VERSMISMATCH) || \ ((s) == RPC_PROCUNAVAIL) || \ ((s) == RPC_PROGUNAVAIL) || \ ((s) == RPC_PROGVERSMISMATCH) || \ ((s) == RPC_CANTDECODEARGS)) /* * Error info. */ struct rpc_err { enum clnt_stat re_status; union { int RE_errno; /* related system error */ enum auth_stat RE_why; /* why the auth error occurred */ struct { rpcvers_t low; /* lowest version supported */ rpcvers_t high; /* highest version supported */ } RE_vers; struct { /* maybe meaningful if RPC_FAILED */ int32_t s1; int32_t s2; } RE_lb; /* life boot & debugging only */ } ru; #define re_errno ru.RE_errno #define re_why ru.RE_why #define re_vers ru.RE_vers #define re_lb ru.RE_lb }; #ifdef _KERNEL /* * Functions of this type may be used to receive notification when RPC * calls have to be re-transmitted etc. */ typedef void rpc_feedback(int cmd, int procnum, void *); /* * Timers used for the pseudo-transport protocol when using datagrams */ struct rpc_timers { u_short rt_srtt; /* smoothed round-trip time */ u_short rt_deviate; /* estimated deviation */ u_long rt_rtxcur; /* current (backed-off) rto */ }; /* * A structure used with CLNT_CALL_EXT to pass extra information used * while processing an RPC call. */ struct rpc_callextra { AUTH *rc_auth; /* auth handle to use for this call */ rpc_feedback *rc_feedback; /* callback for retransmits etc. */ void *rc_feedback_arg; /* argument for callback */ struct rpc_timers *rc_timers; /* optional RTT timers */ struct rpc_err rc_err; /* detailed call status */ }; #endif /* * Client rpc handle. * Created by individual implementations * Client is responsible for initializing auth, see e.g. auth_none.c. */ typedef struct __rpc_client { #ifdef _KERNEL volatile u_int cl_refs; /* reference count */ AUTH *cl_auth; /* authenticator */ - struct clnt_ops { + const struct clnt_ops { /* call remote procedure */ enum clnt_stat (*cl_call)(struct __rpc_client *, struct rpc_callextra *, rpcproc_t, struct mbuf *, struct mbuf **, struct timeval); /* abort a call */ void (*cl_abort)(struct __rpc_client *); /* get specific error code */ void (*cl_geterr)(struct __rpc_client *, struct rpc_err *); /* frees results */ bool_t (*cl_freeres)(struct __rpc_client *, xdrproc_t, void *); /* close the connection and terminate pending RPCs */ void (*cl_close)(struct __rpc_client *); /* destroy this structure */ void (*cl_destroy)(struct __rpc_client *); /* the ioctl() of rpc */ bool_t (*cl_control)(struct __rpc_client *, u_int, void *); } *cl_ops; #else AUTH *cl_auth; /* authenticator */ struct clnt_ops { /* call remote procedure */ enum clnt_stat (*cl_call)(struct __rpc_client *, rpcproc_t, xdrproc_t, void *, xdrproc_t, void *, struct timeval); /* abort a call */ void (*cl_abort)(struct __rpc_client *); /* get specific error code */ void (*cl_geterr)(struct __rpc_client *, struct rpc_err *); /* frees results */ bool_t (*cl_freeres)(struct __rpc_client *, xdrproc_t, void *); /* destroy this structure */ void (*cl_destroy)(struct __rpc_client *); /* the ioctl() of rpc */ bool_t (*cl_control)(struct __rpc_client *, u_int, void *); } *cl_ops; #endif void *cl_private; /* private stuff */ char *cl_netid; /* network token */ char *cl_tp; /* device name */ } CLIENT; /* * Feedback values used for possible congestion and rate control */ #define FEEDBACK_OK 1 /* no retransmits */ #define FEEDBACK_REXMIT1 2 /* first retransmit */ #define FEEDBACK_REXMIT2 3 /* second and subsequent retransmit */ #define FEEDBACK_RECONNECT 4 /* client reconnect */ /* Used to set version of portmapper used in broadcast */ #define CLCR_SET_LOWVERS 3 #define CLCR_GET_LOWVERS 4 #define RPCSMALLMSGSIZE 400 /* a more reasonable packet size */ /* * client side rpc interface ops * * Parameter types are: * */ #ifdef _KERNEL #define CLNT_ACQUIRE(rh) \ refcount_acquire(&(rh)->cl_refs) #define CLNT_RELEASE(rh) \ if (refcount_release(&(rh)->cl_refs)) \ CLNT_DESTROY(rh) /* * void * CLNT_CLOSE(rh); * CLIENT *rh; */ #define CLNT_CLOSE(rh) ((*(rh)->cl_ops->cl_close)(rh)) enum clnt_stat clnt_call_private(CLIENT *, struct rpc_callextra *, rpcproc_t, xdrproc_t, void *, xdrproc_t, void *, struct timeval); /* * enum clnt_stat * CLNT_CALL_MBUF(rh, ext, proc, mreq, mrepp, timeout) * CLIENT *rh; * struct rpc_callextra *ext; * rpcproc_t proc; * struct mbuf *mreq; * struct mbuf **mrepp; * struct timeval timeout; * * Call arguments in mreq which is consumed by the call (even if there * is an error). Results returned in *mrepp. */ #define CLNT_CALL_MBUF(rh, ext, proc, mreq, mrepp, secs) \ ((*(rh)->cl_ops->cl_call)(rh, ext, proc, mreq, mrepp, secs)) /* * enum clnt_stat * CLNT_CALL_EXT(rh, ext, proc, xargs, argsp, xres, resp, timeout) * CLIENT *rh; * struct rpc_callextra *ext; * rpcproc_t proc; * xdrproc_t xargs; * void *argsp; * xdrproc_t xres; * void *resp; * struct timeval timeout; */ #define CLNT_CALL_EXT(rh, ext, proc, xargs, argsp, xres, resp, secs) \ clnt_call_private(rh, ext, proc, xargs, \ argsp, xres, resp, secs) #endif /* * enum clnt_stat * CLNT_CALL(rh, proc, xargs, argsp, xres, resp, timeout) * CLIENT *rh; * rpcproc_t proc; * xdrproc_t xargs; * void *argsp; * xdrproc_t xres; * void *resp; * struct timeval timeout; */ #ifdef _KERNEL #define CLNT_CALL(rh, proc, xargs, argsp, xres, resp, secs) \ clnt_call_private(rh, NULL, proc, xargs, \ argsp, xres, resp, secs) #define clnt_call(rh, proc, xargs, argsp, xres, resp, secs) \ clnt_call_private(rh, NULL, proc, xargs, \ argsp, xres, resp, secs) #else #define CLNT_CALL(rh, proc, xargs, argsp, xres, resp, secs) \ ((*(rh)->cl_ops->cl_call)(rh, proc, xargs, \ argsp, xres, resp, secs)) #define clnt_call(rh, proc, xargs, argsp, xres, resp, secs) \ ((*(rh)->cl_ops->cl_call)(rh, proc, xargs, \ argsp, xres, resp, secs)) #endif /* * void * CLNT_ABORT(rh); * CLIENT *rh; */ #define CLNT_ABORT(rh) ((*(rh)->cl_ops->cl_abort)(rh)) #define clnt_abort(rh) ((*(rh)->cl_ops->cl_abort)(rh)) /* * struct rpc_err * CLNT_GETERR(rh); * CLIENT *rh; */ #define CLNT_GETERR(rh,errp) ((*(rh)->cl_ops->cl_geterr)(rh, errp)) #define clnt_geterr(rh,errp) ((*(rh)->cl_ops->cl_geterr)(rh, errp)) /* * bool_t * CLNT_FREERES(rh, xres, resp); * CLIENT *rh; * xdrproc_t xres; * void *resp; */ #define CLNT_FREERES(rh,xres,resp) ((*(rh)->cl_ops->cl_freeres)(rh,xres,resp)) #define clnt_freeres(rh,xres,resp) ((*(rh)->cl_ops->cl_freeres)(rh,xres,resp)) /* * bool_t * CLNT_CONTROL(cl, request, info) * CLIENT *cl; * u_int request; * char *info; */ #define CLNT_CONTROL(cl,rq,in) ((*(cl)->cl_ops->cl_control)(cl,rq,in)) #define clnt_control(cl,rq,in) ((*(cl)->cl_ops->cl_control)(cl,rq,in)) /* * control operations that apply to both udp and tcp transports */ #define CLSET_TIMEOUT 1 /* set timeout (timeval) */ #define CLGET_TIMEOUT 2 /* get timeout (timeval) */ #define CLGET_SERVER_ADDR 3 /* get server's address (sockaddr) */ #define CLGET_FD 6 /* get connections file descriptor */ #define CLGET_SVC_ADDR 7 /* get server's address (netbuf) */ #define CLSET_FD_CLOSE 8 /* close fd while clnt_destroy */ #define CLSET_FD_NCLOSE 9 /* Do not close fd while clnt_destroy */ #define CLGET_XID 10 /* Get xid */ #define CLSET_XID 11 /* Set xid */ #define CLGET_VERS 12 /* Get version number */ #define CLSET_VERS 13 /* Set version number */ #define CLGET_PROG 14 /* Get program number */ #define CLSET_PROG 15 /* Set program number */ #define CLSET_SVC_ADDR 16 /* get server's address (netbuf) */ #define CLSET_PUSH_TIMOD 17 /* push timod if not already present */ #define CLSET_POP_TIMOD 18 /* pop timod */ /* * Connectionless only control operations */ #define CLSET_RETRY_TIMEOUT 4 /* set retry timeout (timeval) */ #define CLGET_RETRY_TIMEOUT 5 /* get retry timeout (timeval) */ #define CLSET_ASYNC 19 #define CLSET_CONNECT 20 /* Use connect() for UDP. (int) */ #ifdef _KERNEL /* * Kernel control operations. The default msleep string is "rpcrecv", * and sleeps are non-interruptible by default. */ #define CLSET_WAITCHAN 21 /* set string to use in msleep call */ #define CLGET_WAITCHAN 22 /* get string used in msleep call */ #define CLSET_INTERRUPTIBLE 23 /* set interruptible flag */ #define CLGET_INTERRUPTIBLE 24 /* set interruptible flag */ #define CLSET_RETRIES 25 /* set retry count for reconnect */ #define CLGET_RETRIES 26 /* get retry count for reconnect */ #define CLSET_PRIVPORT 27 /* set privileged source port flag */ #define CLGET_PRIVPORT 28 /* get privileged source port flag */ #define CLSET_BACKCHANNEL 29 /* set backchannel for socket */ #define CLSET_TLS 30 /* set TLS for socket */ #define CLSET_BLOCKRCV 31 /* Temporarily block reception */ #define CLSET_TLSCERTNAME 32 /* TLS certificate file name */ /* Structure used as the argument for CLSET_RECONUPCALL. */ struct rpc_reconupcall { void (*call)(CLIENT *, void *, struct ucred *); void *arg; }; #define CLSET_RECONUPCALL 33 /* Reconnect upcall */ #endif /* * void * CLNT_DESTROY(rh); * CLIENT *rh; */ #define CLNT_DESTROY(rh) ((*(rh)->cl_ops->cl_destroy)(rh)) #define clnt_destroy(rh) ((*(rh)->cl_ops->cl_destroy)(rh)) /* * RPCTEST is a test program which is accessible on every rpc * transport/port. It is used for testing, performance evaluation, * and network administration. */ #define RPCTEST_PROGRAM ((rpcprog_t)1) #define RPCTEST_VERSION ((rpcvers_t)1) #define RPCTEST_NULL_PROC ((rpcproc_t)2) #define RPCTEST_NULL_BATCH_PROC ((rpcproc_t)3) /* * By convention, procedure 0 takes null arguments and returns them */ #define NULLPROC ((rpcproc_t)0) /* * Below are the client handle creation routines for the various * implementations of client side rpc. They can return NULL if a * creation failure occurs. */ /* * Generic client creation routine. Supported protocols are those that * belong to the nettype namespace (/etc/netconfig). */ __BEGIN_DECLS #ifdef _KERNEL /* * struct socket *so; -- socket * struct sockaddr *svcaddr; -- servers address * rpcprog_t prog; -- program number * rpcvers_t vers; -- version number * size_t sendsz; -- buffer recv size * size_t recvsz; -- buffer send size */ extern CLIENT *clnt_dg_create(struct socket *so, struct sockaddr *svcaddr, rpcprog_t program, rpcvers_t version, size_t sendsz, size_t recvsz); /* * struct socket *so; -- socket * struct sockaddr *svcaddr; -- servers address * rpcprog_t prog; -- program number * rpcvers_t vers; -- version number * size_t sendsz; -- buffer recv size * size_t recvsz; -- buffer send size * int intrflag; -- is it interruptible */ extern CLIENT *clnt_vc_create(struct socket *so, struct sockaddr *svcaddr, rpcprog_t program, rpcvers_t version, size_t sendsz, size_t recvsz, int intrflag); /* * struct netconfig *nconf; -- network type * struct sockaddr *svcaddr; -- servers address * rpcprog_t prog; -- program number * rpcvers_t vers; -- version number * size_t sendsz; -- buffer recv size * size_t recvsz; -- buffer send size */ extern CLIENT *clnt_reconnect_create(struct netconfig *nconf, struct sockaddr *svcaddr, rpcprog_t program, rpcvers_t version, size_t sendsz, size_t recvsz); #else extern CLIENT *clnt_create(const char *, const rpcprog_t, const rpcvers_t, const char *); /* * * const char *hostname; -- hostname * const rpcprog_t prog; -- program number * const rpcvers_t vers; -- version number * const char *nettype; -- network type */ /* * Generic client creation routine. Just like clnt_create(), except * it takes an additional timeout parameter. */ extern CLIENT * clnt_create_timed(const char *, const rpcprog_t, const rpcvers_t, const char *, const struct timeval *); /* * * const char *hostname; -- hostname * const rpcprog_t prog; -- program number * const rpcvers_t vers; -- version number * const char *nettype; -- network type * const struct timeval *tp; -- timeout */ /* * Generic client creation routine. Supported protocols are which belong * to the nettype name space. */ extern CLIENT *clnt_create_vers(const char *, const rpcprog_t, rpcvers_t *, const rpcvers_t, const rpcvers_t, const char *); /* * const char *host; -- hostname * const rpcprog_t prog; -- program number * rpcvers_t *vers_out; -- servers highest available version * const rpcvers_t vers_low; -- low version number * const rpcvers_t vers_high; -- high version number * const char *nettype; -- network type */ /* * Generic client creation routine. Supported protocols are which belong * to the nettype name space. */ extern CLIENT * clnt_create_vers_timed(const char *, const rpcprog_t, rpcvers_t *, const rpcvers_t, const rpcvers_t, const char *, const struct timeval *); /* * const char *host; -- hostname * const rpcprog_t prog; -- program number * rpcvers_t *vers_out; -- servers highest available version * const rpcvers_t vers_low; -- low version number * const rpcvers_t vers_high; -- high version number * const char *nettype; -- network type * const struct timeval *tp -- timeout */ /* * Generic client creation routine. It takes a netconfig structure * instead of nettype */ extern CLIENT *clnt_tp_create(const char *, const rpcprog_t, const rpcvers_t, const struct netconfig *); /* * const char *hostname; -- hostname * const rpcprog_t prog; -- program number * const rpcvers_t vers; -- version number * const struct netconfig *netconf; -- network config structure */ /* * Generic client creation routine. Just like clnt_tp_create(), except * it takes an additional timeout parameter. */ extern CLIENT * clnt_tp_create_timed(const char *, const rpcprog_t, const rpcvers_t, const struct netconfig *, const struct timeval *); /* * const char *hostname; -- hostname * const rpcprog_t prog; -- program number * const rpcvers_t vers; -- version number * const struct netconfig *netconf; -- network config structure * const struct timeval *tp -- timeout */ /* * Generic TLI create routine. Only provided for compatibility. */ extern CLIENT *clnt_tli_create(const int, const struct netconfig *, struct netbuf *, const rpcprog_t, const rpcvers_t, const u_int, const u_int); /* * const int fd; -- fd * const struct netconfig *nconf; -- netconfig structure * struct netbuf *svcaddr; -- servers address * const u_long prog; -- program number * const u_long vers; -- version number * const u_int sendsz; -- send size * const u_int recvsz; -- recv size */ /* * Low level clnt create routine for connectionful transports, e.g. tcp. */ extern CLIENT *clnt_vc_create(const int, const struct netbuf *, const rpcprog_t, const rpcvers_t, u_int, u_int); /* * Added for compatibility to old rpc 4.0. Obsoleted by clnt_vc_create(). */ extern CLIENT *clntunix_create(struct sockaddr_un *, u_long, u_long, int *, u_int, u_int); /* * const int fd; -- open file descriptor * const struct netbuf *svcaddr; -- servers address * const rpcprog_t prog; -- program number * const rpcvers_t vers; -- version number * const u_int sendsz; -- buffer recv size * const u_int recvsz; -- buffer send size */ /* * Low level clnt create routine for connectionless transports, e.g. udp. */ extern CLIENT *clnt_dg_create(const int, const struct netbuf *, const rpcprog_t, const rpcvers_t, const u_int, const u_int); /* * const int fd; -- open file descriptor * const struct netbuf *svcaddr; -- servers address * const rpcprog_t program; -- program number * const rpcvers_t version; -- version number * const u_int sendsz; -- buffer recv size * const u_int recvsz; -- buffer send size */ /* * Memory based rpc (for speed check and testing) * CLIENT * * clnt_raw_create(prog, vers) * u_long prog; * u_long vers; */ extern CLIENT *clnt_raw_create(rpcprog_t, rpcvers_t); #endif __END_DECLS /* * Print why creation failed */ __BEGIN_DECLS extern void clnt_pcreateerror(const char *); /* stderr */ extern char *clnt_spcreateerror(const char *); /* string */ __END_DECLS /* * Like clnt_perror(), but is more verbose in its output */ __BEGIN_DECLS extern void clnt_perrno(enum clnt_stat); /* stderr */ extern char *clnt_sperrno(enum clnt_stat); /* string */ __END_DECLS /* * Print an English error message, given the client error code */ __BEGIN_DECLS extern void clnt_perror(CLIENT *, const char *); /* stderr */ extern char *clnt_sperror(CLIENT *, const char *); /* string */ __END_DECLS /* * If a creation fails, the following allows the user to figure out why. */ struct rpc_createerr { enum clnt_stat cf_stat; struct rpc_err cf_error; /* useful when cf_stat == RPC_PMAPFAILURE */ }; #ifdef _KERNEL extern struct rpc_createerr rpc_createerr; #else __BEGIN_DECLS extern struct rpc_createerr *__rpc_createerr(void); __END_DECLS #define rpc_createerr (*(__rpc_createerr())) #endif #ifndef _KERNEL /* * The simplified interface: * enum clnt_stat * rpc_call(host, prognum, versnum, procnum, inproc, in, outproc, out, nettype) * const char *host; * const rpcprog_t prognum; * const rpcvers_t versnum; * const rpcproc_t procnum; * const xdrproc_t inproc, outproc; * const char *in; * char *out; * const char *nettype; */ __BEGIN_DECLS extern enum clnt_stat rpc_call(const char *, const rpcprog_t, const rpcvers_t, const rpcproc_t, const xdrproc_t, const char *, const xdrproc_t, char *, const char *); __END_DECLS /* * RPC broadcast interface * The call is broadcasted to all locally connected nets. * * extern enum clnt_stat * rpc_broadcast(prog, vers, proc, xargs, argsp, xresults, resultsp, * eachresult, nettype) * const rpcprog_t prog; -- program number * const rpcvers_t vers; -- version number * const rpcproc_t proc; -- procedure number * const xdrproc_t xargs; -- xdr routine for args * caddr_t argsp; -- pointer to args * const xdrproc_t xresults; -- xdr routine for results * caddr_t resultsp; -- pointer to results * const resultproc_t eachresult; -- call with each result * const char *nettype; -- Transport type * * For each valid response received, the procedure eachresult is called. * Its form is: * done = eachresult(resp, raddr, nconf) * bool_t done; * caddr_t resp; * struct netbuf *raddr; * struct netconfig *nconf; * where resp points to the results of the call and raddr is the * address if the responder to the broadcast. nconf is the transport * on which the response was received. * * extern enum clnt_stat * rpc_broadcast_exp(prog, vers, proc, xargs, argsp, xresults, resultsp, * eachresult, inittime, waittime, nettype) * const rpcprog_t prog; -- program number * const rpcvers_t vers; -- version number * const rpcproc_t proc; -- procedure number * const xdrproc_t xargs; -- xdr routine for args * caddr_t argsp; -- pointer to args * const xdrproc_t xresults; -- xdr routine for results * caddr_t resultsp; -- pointer to results * const resultproc_t eachresult; -- call with each result * const int inittime; -- how long to wait initially * const int waittime; -- maximum time to wait * const char *nettype; -- Transport type */ typedef bool_t (*resultproc_t)(caddr_t, ...); __BEGIN_DECLS extern enum clnt_stat rpc_broadcast(const rpcprog_t, const rpcvers_t, const rpcproc_t, const xdrproc_t, caddr_t, const xdrproc_t, caddr_t, const resultproc_t, const char *); extern enum clnt_stat rpc_broadcast_exp(const rpcprog_t, const rpcvers_t, const rpcproc_t, const xdrproc_t, caddr_t, const xdrproc_t, caddr_t, const resultproc_t, const int, const int, const char *); __END_DECLS /* For backward compatibility */ #include #endif #endif /* !_RPC_CLNT_H_ */ diff --git a/sys/rpc/clnt_bck.c b/sys/rpc/clnt_bck.c index 2414171bad37..514905bf1cc2 100644 --- a/sys/rpc/clnt_bck.c +++ b/sys/rpc/clnt_bck.c @@ -1,619 +1,619 @@ /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ /*- * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC"; static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro"; #endif #include __FBSDID("$FreeBSD$"); /* * clnt_tcp.c, Implements a TCP/IP based, client side RPC. * * Copyright (C) 1984, Sun Microsystems, Inc. * * TCP based RPC supports 'batched calls'. * A sequence of calls may be batched-up in a send buffer. The rpc call * return immediately to the client even though the call was not necessarily * sent. The batching occurs if the results' xdr routine is NULL (0) AND * the rpc timeout value is zero (see clnt.h, rpc). * * Clients should NOT casually batch calls that in fact return results; that is, * the server side should be aware that a call is batched and not produce any * return message. Batched calls that produce many result messages can * deadlock (netlock) the client and the server.... * * Now go hang yourself. */ /* * This code handles the special case of a NFSv4.n backchannel for * callback RPCs. It is similar to clnt_vc.c, but uses the TCP * connection provided by the client to the server. */ #include "opt_kern_tls.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct cmessage { struct cmsghdr cmsg; struct cmsgcred cmcred; }; static void clnt_bck_geterr(CLIENT *, struct rpc_err *); static bool_t clnt_bck_freeres(CLIENT *, xdrproc_t, void *); static void clnt_bck_abort(CLIENT *); static bool_t clnt_bck_control(CLIENT *, u_int, void *); static void clnt_bck_close(CLIENT *); static void clnt_bck_destroy(CLIENT *); -static struct clnt_ops clnt_bck_ops = { +static const struct clnt_ops clnt_bck_ops = { .cl_abort = clnt_bck_abort, .cl_geterr = clnt_bck_geterr, .cl_freeres = clnt_bck_freeres, .cl_close = clnt_bck_close, .cl_destroy = clnt_bck_destroy, .cl_control = clnt_bck_control }; /* * Create a client handle for a connection. * Default options are set, which the user can change using clnt_control()'s. * This code handles the special case of an NFSv4.1 session backchannel * call, which is sent on a TCP connection created against the server * by a client. */ void * clnt_bck_create( struct socket *so, /* Server transport socket. */ const rpcprog_t prog, /* program number */ const rpcvers_t vers) /* version number */ { CLIENT *cl; /* client handle */ struct ct_data *ct = NULL; /* client handle */ struct timeval now; struct rpc_msg call_msg; static uint32_t disrupt; XDR xdrs; if (disrupt == 0) disrupt = (uint32_t)(long)so; cl = (CLIENT *)mem_alloc(sizeof (*cl)); ct = (struct ct_data *)mem_alloc(sizeof (*ct)); mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF); ct->ct_threads = 0; ct->ct_closing = FALSE; ct->ct_closed = FALSE; ct->ct_upcallrefs = 0; ct->ct_closeit = FALSE; /* * Set up private data struct */ ct->ct_wait.tv_sec = -1; ct->ct_wait.tv_usec = -1; /* * Initialize call message */ getmicrotime(&now); ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now); call_msg.rm_xid = ct->ct_xid; call_msg.rm_direction = CALL; call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; call_msg.rm_call.cb_prog = (uint32_t)prog; call_msg.rm_call.cb_vers = (uint32_t)vers; /* * pre-serialize the static part of the call msg and stash it away */ xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE, XDR_ENCODE); if (!xdr_callhdr(&xdrs, &call_msg)) goto err; ct->ct_mpos = XDR_GETPOS(&xdrs); XDR_DESTROY(&xdrs); ct->ct_waitchan = "rpcbck"; ct->ct_waitflag = 0; cl->cl_refs = 1; cl->cl_ops = &clnt_bck_ops; cl->cl_private = ct; cl->cl_auth = authnone_create(); TAILQ_INIT(&ct->ct_pending); return (cl); err: mtx_destroy(&ct->ct_lock); mem_free(ct, sizeof (struct ct_data)); mem_free(cl, sizeof (CLIENT)); return (NULL); } enum clnt_stat clnt_bck_call( CLIENT *cl, /* client handle */ struct rpc_callextra *ext, /* call metadata */ rpcproc_t proc, /* procedure number */ struct mbuf *args, /* pointer to args */ struct mbuf **resultsp, /* pointer to results */ struct timeval utimeout, SVCXPRT *xprt) { struct ct_data *ct = (struct ct_data *) cl->cl_private; AUTH *auth; struct rpc_err *errp; enum clnt_stat stat; XDR xdrs; struct rpc_msg reply_msg; bool_t ok; int nrefreshes = 2; /* number of times to refresh cred */ struct timeval timeout; uint32_t xid; struct mbuf *mreq = NULL, *results; struct ct_request *cr; int error, maxextsiz; #ifdef KERN_TLS u_int maxlen; #endif cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK); mtx_lock(&ct->ct_lock); if (ct->ct_closing || ct->ct_closed) { mtx_unlock(&ct->ct_lock); free(cr, M_RPC); return (RPC_CANTSEND); } ct->ct_threads++; if (ext) { auth = ext->rc_auth; errp = &ext->rc_err; } else { auth = cl->cl_auth; errp = &ct->ct_error; } cr->cr_mrep = NULL; cr->cr_error = 0; if (ct->ct_wait.tv_usec == -1) timeout = utimeout; /* use supplied timeout */ else timeout = ct->ct_wait; /* use default timeout */ call_again: mtx_assert(&ct->ct_lock, MA_OWNED); ct->ct_xid++; xid = ct->ct_xid; mtx_unlock(&ct->ct_lock); /* * Leave space to pre-pend the record mark. */ mreq = m_gethdr(M_WAITOK, MT_DATA); mreq->m_data += sizeof(uint32_t); KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN, ("RPC header too big")); bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos); mreq->m_len = ct->ct_mpos; /* * The XID is the first thing in the request. */ *mtod(mreq, uint32_t *) = htonl(xid); xdrmbuf_create(&xdrs, mreq, XDR_ENCODE); errp->re_status = stat = RPC_SUCCESS; if ((!XDR_PUTINT32(&xdrs, &proc)) || (!AUTH_MARSHALL(auth, xid, &xdrs, m_copym(args, 0, M_COPYALL, M_WAITOK)))) { errp->re_status = stat = RPC_CANTENCODEARGS; mtx_lock(&ct->ct_lock); goto out; } mreq->m_pkthdr.len = m_length(mreq, NULL); /* * Prepend a record marker containing the packet length. */ M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK); *mtod(mreq, uint32_t *) = htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t))); cr->cr_xid = xid; mtx_lock(&ct->ct_lock); /* * Check to see if the client end has already started to close down * the connection. The svc code will have set ct_error.re_status * to RPC_CANTRECV if this is the case. * If the client starts to close down the connection after this * point, it will be detected later when cr_error is checked, * since the request is in the ct_pending queue. */ if (ct->ct_error.re_status == RPC_CANTRECV) { if (errp != &ct->ct_error) { errp->re_errno = ct->ct_error.re_errno; errp->re_status = RPC_CANTRECV; } stat = RPC_CANTRECV; goto out; } TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link); mtx_unlock(&ct->ct_lock); /* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */ if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * Copy the mbuf chain to a chain of * ext_pgs mbuf(s) as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mreq = _rpc_copym_into_ext_pgs(mreq, maxextsiz); } /* * sosend consumes mreq. */ sx_xlock(&xprt->xp_lock); error = sosend(xprt->xp_socket, NULL, NULL, mreq, NULL, 0, curthread); if (error != 0) printf("sosend=%d\n", error); mreq = NULL; if (error == EMSGSIZE) { printf("emsgsize\n"); SOCKBUF_LOCK(&xprt->xp_socket->so_snd); sbwait(&xprt->xp_socket->so_snd); SOCKBUF_UNLOCK(&xprt->xp_socket->so_snd); sx_xunlock(&xprt->xp_lock); AUTH_VALIDATE(auth, xid, NULL, NULL); mtx_lock(&ct->ct_lock); TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); goto call_again; } sx_xunlock(&xprt->xp_lock); reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL; reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf; reply_msg.acpted_rply.ar_verf.oa_length = 0; reply_msg.acpted_rply.ar_results.where = NULL; reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; mtx_lock(&ct->ct_lock); if (error) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_errno = error; errp->re_status = stat = RPC_CANTSEND; goto out; } /* * Check to see if we got an upcall while waiting for the * lock. In both these cases, the request has been removed * from ct->ct_pending. */ if (cr->cr_error) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_errno = cr->cr_error; errp->re_status = stat = RPC_CANTRECV; goto out; } if (cr->cr_mrep) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); goto got_reply; } /* * Hack to provide rpc-based message passing */ if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_status = stat = RPC_TIMEDOUT; goto out; } error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan, tvtohz(&timeout)); TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); if (error) { /* * The sleep returned an error so our request is still * on the list. Turn the error code into an * appropriate client status. */ errp->re_errno = error; switch (error) { case EINTR: stat = RPC_INTR; break; case EWOULDBLOCK: stat = RPC_TIMEDOUT; break; default: stat = RPC_CANTRECV; } errp->re_status = stat; goto out; } else { /* * We were woken up by the svc thread. If the * upcall had a receive error, report that, * otherwise we have a reply. */ if (cr->cr_error) { errp->re_errno = cr->cr_error; errp->re_status = stat = RPC_CANTRECV; goto out; } } got_reply: /* * Now decode and validate the response. We need to drop the * lock since xdr_replymsg may end up sleeping in malloc. */ mtx_unlock(&ct->ct_lock); if (ext && ext->rc_feedback) ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg); xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE); ok = xdr_replymsg(&xdrs, &reply_msg); cr->cr_mrep = NULL; if (ok) { if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) && (reply_msg.acpted_rply.ar_stat == SUCCESS)) errp->re_status = stat = RPC_SUCCESS; else stat = _seterr_reply(&reply_msg, errp); if (stat == RPC_SUCCESS) { results = xdrmbuf_getall(&xdrs); if (!AUTH_VALIDATE(auth, xid, &reply_msg.acpted_rply.ar_verf, &results)) { errp->re_status = stat = RPC_AUTHERROR; errp->re_why = AUTH_INVALIDRESP; } else { KASSERT(results, ("auth validated but no result")); *resultsp = results; } } /* end successful completion */ /* * If unsuccessful AND error is an authentication error * then refresh credentials and try again, else break */ else if (stat == RPC_AUTHERROR) /* maybe our credentials need to be refreshed ... */ if (nrefreshes > 0 && AUTH_REFRESH(auth, &reply_msg)) { nrefreshes--; XDR_DESTROY(&xdrs); mtx_lock(&ct->ct_lock); goto call_again; } /* end of unsuccessful completion */ /* end of valid reply message */ } else errp->re_status = stat = RPC_CANTDECODERES; XDR_DESTROY(&xdrs); mtx_lock(&ct->ct_lock); out: mtx_assert(&ct->ct_lock, MA_OWNED); KASSERT(stat != RPC_SUCCESS || *resultsp, ("RPC_SUCCESS without reply")); if (mreq != NULL) m_freem(mreq); if (cr->cr_mrep != NULL) m_freem(cr->cr_mrep); ct->ct_threads--; if (ct->ct_closing) wakeup(ct); mtx_unlock(&ct->ct_lock); if (auth && stat != RPC_SUCCESS) AUTH_VALIDATE(auth, xid, NULL, NULL); free(cr, M_RPC); return (stat); } static void clnt_bck_geterr(CLIENT *cl, struct rpc_err *errp) { struct ct_data *ct = (struct ct_data *) cl->cl_private; *errp = ct->ct_error; } static bool_t clnt_bck_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) { XDR xdrs; bool_t dummy; xdrs.x_op = XDR_FREE; dummy = (*xdr_res)(&xdrs, res_ptr); return (dummy); } /*ARGSUSED*/ static void clnt_bck_abort(CLIENT *cl) { } static bool_t clnt_bck_control(CLIENT *cl, u_int request, void *info) { return (TRUE); } static void clnt_bck_close(CLIENT *cl) { struct ct_data *ct = (struct ct_data *) cl->cl_private; mtx_lock(&ct->ct_lock); if (ct->ct_closed) { mtx_unlock(&ct->ct_lock); return; } if (ct->ct_closing) { while (ct->ct_closing) msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); KASSERT(ct->ct_closed, ("client should be closed")); mtx_unlock(&ct->ct_lock); return; } ct->ct_closing = FALSE; ct->ct_closed = TRUE; mtx_unlock(&ct->ct_lock); wakeup(ct); } static void clnt_bck_destroy(CLIENT *cl) { struct ct_data *ct = (struct ct_data *) cl->cl_private; clnt_bck_close(cl); mtx_destroy(&ct->ct_lock); mem_free(ct, sizeof(struct ct_data)); if (cl->cl_netid && cl->cl_netid[0]) mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); if (cl->cl_tp && cl->cl_tp[0]) mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); mem_free(cl, sizeof(CLIENT)); } /* * This call is done by the svc code when a backchannel RPC reply is * received. * For the server end, where callback RPCs to the client are performed, * xp_p2 points to the "CLIENT" and not the associated "struct ct_data" * so that svc_vc_destroy() can CLNT_RELEASE() the reference count on it. */ void clnt_bck_svccall(void *arg, struct mbuf *mrep, uint32_t xid) { CLIENT *cl = (CLIENT *)arg; struct ct_data *ct; struct ct_request *cr; int foundreq; ct = (struct ct_data *)cl->cl_private; mtx_lock(&ct->ct_lock); if (ct->ct_closing || ct->ct_closed) { mtx_unlock(&ct->ct_lock); m_freem(mrep); return; } ct->ct_upcallrefs++; /* * See if we can match this reply to a request. */ foundreq = 0; TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { if (cr->cr_xid == xid) { /* * This one matches. We leave the reply mbuf list in * cr->cr_mrep. Set the XID to zero so that we will * ignore any duplicated replies. */ cr->cr_xid = 0; cr->cr_mrep = mrep; cr->cr_error = 0; foundreq = 1; wakeup(cr); break; } } ct->ct_upcallrefs--; if (ct->ct_upcallrefs < 0) panic("rpcvc svccall refcnt"); if (ct->ct_upcallrefs == 0) wakeup(&ct->ct_upcallrefs); mtx_unlock(&ct->ct_lock); if (foundreq == 0) m_freem(mrep); } diff --git a/sys/rpc/clnt_dg.c b/sys/rpc/clnt_dg.c index 3a3662a02a39..63a26cc0b9ac 100644 --- a/sys/rpc/clnt_dg.c +++ b/sys/rpc/clnt_dg.c @@ -1,1155 +1,1155 @@ /* $NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1986-1991 by Sun Microsystems Inc. */ #if defined(LIBC_SCCS) && !defined(lint) #ident "@(#)clnt_dg.c 1.23 94/04/22 SMI" static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro"; #endif #include __FBSDID("$FreeBSD$"); /* * Implements a connectionless client side RPC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _FREEFALL_CONFIG /* * Disable RPC exponential back-off for FreeBSD.org systems. */ #define RPC_MAX_BACKOFF 1 /* second */ #else #define RPC_MAX_BACKOFF 30 /* seconds */ #endif static bool_t time_not_ok(struct timeval *); static enum clnt_stat clnt_dg_call(CLIENT *, struct rpc_callextra *, rpcproc_t, struct mbuf *, struct mbuf **, struct timeval); static void clnt_dg_geterr(CLIENT *, struct rpc_err *); static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *); static void clnt_dg_abort(CLIENT *); static bool_t clnt_dg_control(CLIENT *, u_int, void *); static void clnt_dg_close(CLIENT *); static void clnt_dg_destroy(CLIENT *); static int clnt_dg_soupcall(struct socket *so, void *arg, int waitflag); -static struct clnt_ops clnt_dg_ops = { +static const struct clnt_ops clnt_dg_ops = { .cl_call = clnt_dg_call, .cl_abort = clnt_dg_abort, .cl_geterr = clnt_dg_geterr, .cl_freeres = clnt_dg_freeres, .cl_close = clnt_dg_close, .cl_destroy = clnt_dg_destroy, .cl_control = clnt_dg_control }; static volatile uint32_t rpc_xid = 0; /* * A pending RPC request which awaits a reply. Requests which have * received their reply will have cr_xid set to zero and cr_mrep to * the mbuf chain of the reply. */ struct cu_request { TAILQ_ENTRY(cu_request) cr_link; CLIENT *cr_client; /* owner */ uint32_t cr_xid; /* XID of request */ struct mbuf *cr_mrep; /* reply received by upcall */ int cr_error; /* any error from upcall */ char cr_verf[MAX_AUTH_BYTES]; /* reply verf */ }; TAILQ_HEAD(cu_request_list, cu_request); #define MCALL_MSG_SIZE 24 /* * This structure is pointed to by the socket buffer's sb_upcallarg * member. It is separate from the client private data to facilitate * multiple clients sharing the same socket. The cs_lock mutex is used * to protect all fields of this structure, the socket's receive * buffer SOCKBUF_LOCK is used to ensure that exactly one of these * structures is installed on the socket. */ struct cu_socket { struct mtx cs_lock; int cs_refs; /* Count of clients */ struct cu_request_list cs_pending; /* Requests awaiting replies */ int cs_upcallrefs; /* Refcnt of upcalls in prog.*/ }; static void clnt_dg_upcallsdone(struct socket *, struct cu_socket *); /* * Private data kept per client handle */ struct cu_data { int cu_threads; /* # threads in clnt_vc_call */ bool_t cu_closing; /* TRUE if we are closing */ bool_t cu_closed; /* TRUE if we are closed */ struct socket *cu_socket; /* connection socket */ bool_t cu_closeit; /* opened by library */ struct sockaddr_storage cu_raddr; /* remote address */ int cu_rlen; struct timeval cu_wait; /* retransmit interval */ struct timeval cu_total; /* total time for the call */ struct rpc_err cu_error; uint32_t cu_xid; char cu_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */ size_t cu_mcalllen; size_t cu_sendsz; /* send size */ size_t cu_recvsz; /* recv size */ int cu_async; int cu_connect; /* Use connect(). */ int cu_connected; /* Have done connect(). */ const char *cu_waitchan; int cu_waitflag; int cu_cwnd; /* congestion window */ int cu_sent; /* number of in-flight RPCs */ bool_t cu_cwnd_wait; }; #define CWNDSCALE 256 #define MAXCWND (32 * CWNDSCALE) /* * Connection less client creation returns with client handle parameters. * Default options are set, which the user can change using clnt_control(). * fd should be open and bound. * NB: The rpch->cl_auth is initialized to null authentication. * Caller may wish to set this something more useful. * * sendsz and recvsz are the maximum allowable packet sizes that can be * sent and received. Normally they are the same, but they can be * changed to improve the program efficiency and buffer allocation. * If they are 0, use the transport default. * * If svcaddr is NULL, returns NULL. */ CLIENT * clnt_dg_create( struct socket *so, struct sockaddr *svcaddr, /* servers address */ rpcprog_t program, /* program number */ rpcvers_t version, /* version number */ size_t sendsz, /* buffer recv size */ size_t recvsz) /* buffer send size */ { CLIENT *cl = NULL; /* client handle */ struct cu_data *cu = NULL; /* private data */ struct cu_socket *cs = NULL; struct sockbuf *sb; struct timeval now; struct rpc_msg call_msg; struct __rpc_sockinfo si; XDR xdrs; int error; uint32_t newxid; if (svcaddr == NULL) { rpc_createerr.cf_stat = RPC_UNKNOWNADDR; return (NULL); } if (!__rpc_socket2sockinfo(so, &si)) { rpc_createerr.cf_stat = RPC_TLIERROR; rpc_createerr.cf_error.re_errno = 0; return (NULL); } /* * Find the receive and the send size */ sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); if ((sendsz == 0) || (recvsz == 0)) { rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */ rpc_createerr.cf_error.re_errno = 0; return (NULL); } cl = mem_alloc(sizeof (CLIENT)); /* * Should be multiple of 4 for XDR. */ sendsz = rounddown(sendsz + 3, 4); recvsz = rounddown(recvsz + 3, 4); cu = mem_alloc(sizeof (*cu)); cu->cu_threads = 0; cu->cu_closing = FALSE; cu->cu_closed = FALSE; (void) memcpy(&cu->cu_raddr, svcaddr, (size_t)svcaddr->sa_len); cu->cu_rlen = svcaddr->sa_len; /* Other values can also be set through clnt_control() */ cu->cu_wait.tv_sec = 3; /* heuristically chosen */ cu->cu_wait.tv_usec = 0; cu->cu_total.tv_sec = -1; cu->cu_total.tv_usec = -1; cu->cu_sendsz = sendsz; cu->cu_recvsz = recvsz; cu->cu_async = FALSE; cu->cu_connect = FALSE; cu->cu_connected = FALSE; cu->cu_waitchan = "rpcrecv"; cu->cu_waitflag = 0; cu->cu_cwnd = MAXCWND / 2; cu->cu_sent = 0; cu->cu_cwnd_wait = FALSE; (void) getmicrotime(&now); /* Clip at 28bits so that it will not wrap around. */ newxid = __RPC_GETXID(&now) & 0xfffffff; atomic_cmpset_32(&rpc_xid, 0, newxid); call_msg.rm_xid = atomic_fetchadd_32(&rpc_xid, 1); call_msg.rm_call.cb_prog = program; call_msg.rm_call.cb_vers = version; xdrmem_create(&xdrs, cu->cu_mcallc, MCALL_MSG_SIZE, XDR_ENCODE); if (! xdr_callhdr(&xdrs, &call_msg)) { rpc_createerr.cf_stat = RPC_CANTENCODEARGS; /* XXX */ rpc_createerr.cf_error.re_errno = 0; goto err2; } cu->cu_mcalllen = XDR_GETPOS(&xdrs); /* * By default, closeit is always FALSE. It is users responsibility * to do a close on it, else the user may use clnt_control * to let clnt_destroy do it for him/her. */ cu->cu_closeit = FALSE; cu->cu_socket = so; error = soreserve(so, (u_long)sendsz, (u_long)recvsz); if (error != 0) { rpc_createerr.cf_stat = RPC_FAILED; rpc_createerr.cf_error.re_errno = error; goto err2; } sb = &so->so_rcv; SOCKBUF_LOCK(&so->so_rcv); recheck_socket: if (sb->sb_upcall) { if (sb->sb_upcall != clnt_dg_soupcall) { SOCKBUF_UNLOCK(&so->so_rcv); printf("clnt_dg_create(): socket already has an incompatible upcall\n"); goto err2; } cs = (struct cu_socket *) sb->sb_upcallarg; mtx_lock(&cs->cs_lock); cs->cs_refs++; mtx_unlock(&cs->cs_lock); } else { /* * We are the first on this socket - allocate the * structure and install it in the socket. */ SOCKBUF_UNLOCK(&so->so_rcv); cs = mem_alloc(sizeof(*cs)); SOCKBUF_LOCK(&so->so_rcv); if (sb->sb_upcall) { /* * We have lost a race with some other client. */ mem_free(cs, sizeof(*cs)); goto recheck_socket; } mtx_init(&cs->cs_lock, "cs->cs_lock", NULL, MTX_DEF); cs->cs_refs = 1; cs->cs_upcallrefs = 0; TAILQ_INIT(&cs->cs_pending); soupcall_set(so, SO_RCV, clnt_dg_soupcall, cs); } SOCKBUF_UNLOCK(&so->so_rcv); cl->cl_refs = 1; cl->cl_ops = &clnt_dg_ops; cl->cl_private = (caddr_t)(void *)cu; cl->cl_auth = authnone_create(); cl->cl_tp = NULL; cl->cl_netid = NULL; return (cl); err2: mem_free(cl, sizeof (CLIENT)); mem_free(cu, sizeof (*cu)); return (NULL); } static enum clnt_stat clnt_dg_call( CLIENT *cl, /* client handle */ struct rpc_callextra *ext, /* call metadata */ rpcproc_t proc, /* procedure number */ struct mbuf *args, /* pointer to args */ struct mbuf **resultsp, /* pointer to results */ struct timeval utimeout) /* seconds to wait before giving up */ { struct cu_data *cu = (struct cu_data *)cl->cl_private; struct cu_socket *cs; struct rpc_timers *rt; AUTH *auth; struct rpc_err *errp; enum clnt_stat stat; XDR xdrs; struct rpc_msg reply_msg; bool_t ok; int retrans; /* number of re-transmits so far */ int nrefreshes = 2; /* number of times to refresh cred */ struct timeval *tvp; int timeout; int retransmit_time; int next_sendtime, starttime, rtt, time_waited, tv = 0; struct sockaddr *sa; uint32_t xid = 0; struct mbuf *mreq = NULL, *results; struct cu_request *cr; int error; cs = cu->cu_socket->so_rcv.sb_upcallarg; cr = malloc(sizeof(struct cu_request), M_RPC, M_WAITOK); mtx_lock(&cs->cs_lock); if (cu->cu_closing || cu->cu_closed) { mtx_unlock(&cs->cs_lock); free(cr, M_RPC); return (RPC_CANTSEND); } cu->cu_threads++; if (ext) { auth = ext->rc_auth; errp = &ext->rc_err; } else { auth = cl->cl_auth; errp = &cu->cu_error; } cr->cr_client = cl; cr->cr_mrep = NULL; cr->cr_error = 0; if (cu->cu_total.tv_usec == -1) { tvp = &utimeout; /* use supplied timeout */ } else { tvp = &cu->cu_total; /* use default timeout */ } if (tvp->tv_sec || tvp->tv_usec) timeout = tvtohz(tvp); else timeout = 0; if (cu->cu_connect && !cu->cu_connected) { mtx_unlock(&cs->cs_lock); error = soconnect(cu->cu_socket, (struct sockaddr *)&cu->cu_raddr, curthread); mtx_lock(&cs->cs_lock); if (error) { errp->re_errno = error; errp->re_status = stat = RPC_CANTSEND; goto out; } cu->cu_connected = 1; } if (cu->cu_connected) sa = NULL; else sa = (struct sockaddr *)&cu->cu_raddr; time_waited = 0; retrans = 0; if (ext && ext->rc_timers) { rt = ext->rc_timers; if (!rt->rt_rtxcur) rt->rt_rtxcur = tvtohz(&cu->cu_wait); retransmit_time = next_sendtime = rt->rt_rtxcur; } else { rt = NULL; retransmit_time = next_sendtime = tvtohz(&cu->cu_wait); } starttime = ticks; call_again: mtx_assert(&cs->cs_lock, MA_OWNED); xid = atomic_fetchadd_32(&rpc_xid, 1); send_again: mtx_unlock(&cs->cs_lock); mreq = m_gethdr(M_WAITOK, MT_DATA); KASSERT(cu->cu_mcalllen <= MHLEN, ("RPC header too big")); bcopy(cu->cu_mcallc, mreq->m_data, cu->cu_mcalllen); mreq->m_len = cu->cu_mcalllen; /* * The XID is the first thing in the request. */ *mtod(mreq, uint32_t *) = htonl(xid); xdrmbuf_create(&xdrs, mreq, XDR_ENCODE); if (cu->cu_async == TRUE && args == NULL) goto get_reply; if ((! XDR_PUTINT32(&xdrs, &proc)) || (! AUTH_MARSHALL(auth, xid, &xdrs, m_copym(args, 0, M_COPYALL, M_WAITOK)))) { errp->re_status = stat = RPC_CANTENCODEARGS; mtx_lock(&cs->cs_lock); goto out; } mreq->m_pkthdr.len = m_length(mreq, NULL); cr->cr_xid = xid; mtx_lock(&cs->cs_lock); /* * Try to get a place in the congestion window. */ while (cu->cu_sent >= cu->cu_cwnd) { cu->cu_cwnd_wait = TRUE; error = msleep(&cu->cu_cwnd_wait, &cs->cs_lock, cu->cu_waitflag, "rpccwnd", 0); if (error) { errp->re_errno = error; if (error == EINTR || error == ERESTART) errp->re_status = stat = RPC_INTR; else errp->re_status = stat = RPC_CANTSEND; goto out; } } cu->cu_sent += CWNDSCALE; TAILQ_INSERT_TAIL(&cs->cs_pending, cr, cr_link); mtx_unlock(&cs->cs_lock); /* * sosend consumes mreq. */ error = sosend(cu->cu_socket, sa, NULL, mreq, NULL, 0, curthread); mreq = NULL; /* * sub-optimal code appears here because we have * some clock time to spare while the packets are in flight. * (We assume that this is actually only executed once.) */ reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL; reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf; reply_msg.acpted_rply.ar_verf.oa_length = 0; reply_msg.acpted_rply.ar_results.where = NULL; reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; mtx_lock(&cs->cs_lock); if (error) { TAILQ_REMOVE(&cs->cs_pending, cr, cr_link); errp->re_errno = error; errp->re_status = stat = RPC_CANTSEND; cu->cu_sent -= CWNDSCALE; if (cu->cu_cwnd_wait) { cu->cu_cwnd_wait = FALSE; wakeup(&cu->cu_cwnd_wait); } goto out; } /* * Check to see if we got an upcall while waiting for the * lock. */ if (cr->cr_error) { TAILQ_REMOVE(&cs->cs_pending, cr, cr_link); errp->re_errno = cr->cr_error; errp->re_status = stat = RPC_CANTRECV; cu->cu_sent -= CWNDSCALE; if (cu->cu_cwnd_wait) { cu->cu_cwnd_wait = FALSE; wakeup(&cu->cu_cwnd_wait); } goto out; } if (cr->cr_mrep) { TAILQ_REMOVE(&cs->cs_pending, cr, cr_link); cu->cu_sent -= CWNDSCALE; if (cu->cu_cwnd_wait) { cu->cu_cwnd_wait = FALSE; wakeup(&cu->cu_cwnd_wait); } goto got_reply; } /* * Hack to provide rpc-based message passing */ if (timeout == 0) { TAILQ_REMOVE(&cs->cs_pending, cr, cr_link); errp->re_status = stat = RPC_TIMEDOUT; cu->cu_sent -= CWNDSCALE; if (cu->cu_cwnd_wait) { cu->cu_cwnd_wait = FALSE; wakeup(&cu->cu_cwnd_wait); } goto out; } get_reply: for (;;) { /* Decide how long to wait. */ if (next_sendtime < timeout) tv = next_sendtime; else tv = timeout; tv -= time_waited; if (tv > 0) { if (cu->cu_closing || cu->cu_closed) { error = 0; cr->cr_error = ESHUTDOWN; } else { error = msleep(cr, &cs->cs_lock, cu->cu_waitflag, cu->cu_waitchan, tv); } } else { error = EWOULDBLOCK; } TAILQ_REMOVE(&cs->cs_pending, cr, cr_link); cu->cu_sent -= CWNDSCALE; if (cu->cu_cwnd_wait) { cu->cu_cwnd_wait = FALSE; wakeup(&cu->cu_cwnd_wait); } if (!error) { /* * We were woken up by the upcall. If the * upcall had a receive error, report that, * otherwise we have a reply. */ if (cr->cr_error) { errp->re_errno = cr->cr_error; errp->re_status = stat = RPC_CANTRECV; goto out; } cu->cu_cwnd += (CWNDSCALE * CWNDSCALE + cu->cu_cwnd / 2) / cu->cu_cwnd; if (cu->cu_cwnd > MAXCWND) cu->cu_cwnd = MAXCWND; if (rt) { /* * Add one to the time since a tick * count of N means that the actual * time taken was somewhere between N * and N+1. */ rtt = ticks - starttime + 1; /* * Update our estimate of the round * trip time using roughly the * algorithm described in RFC * 2988. Given an RTT sample R: * * RTTVAR = (1-beta) * RTTVAR + beta * |SRTT-R| * SRTT = (1-alpha) * SRTT + alpha * R * * where alpha = 0.125 and beta = 0.25. * * The initial retransmit timeout is * SRTT + 4*RTTVAR and doubles on each * retransmision. */ if (rt->rt_srtt == 0) { rt->rt_srtt = rtt; rt->rt_deviate = rtt / 2; } else { int32_t error = rtt - rt->rt_srtt; rt->rt_srtt += error / 8; error = abs(error) - rt->rt_deviate; rt->rt_deviate += error / 4; } rt->rt_rtxcur = rt->rt_srtt + 4*rt->rt_deviate; } break; } /* * The sleep returned an error so our request is still * on the list. If we got EWOULDBLOCK, we may want to * re-send the request. */ if (error != EWOULDBLOCK) { errp->re_errno = error; if (error == EINTR || error == ERESTART) errp->re_status = stat = RPC_INTR; else errp->re_status = stat = RPC_CANTRECV; goto out; } time_waited = ticks - starttime; /* Check for timeout. */ if (time_waited > timeout) { errp->re_errno = EWOULDBLOCK; errp->re_status = stat = RPC_TIMEDOUT; goto out; } /* Retransmit if necessary. */ if (time_waited >= next_sendtime) { cu->cu_cwnd /= 2; if (cu->cu_cwnd < CWNDSCALE) cu->cu_cwnd = CWNDSCALE; if (ext && ext->rc_feedback) { mtx_unlock(&cs->cs_lock); if (retrans == 0) ext->rc_feedback(FEEDBACK_REXMIT1, proc, ext->rc_feedback_arg); else ext->rc_feedback(FEEDBACK_REXMIT2, proc, ext->rc_feedback_arg); mtx_lock(&cs->cs_lock); } if (cu->cu_closing || cu->cu_closed) { errp->re_errno = ESHUTDOWN; errp->re_status = stat = RPC_CANTRECV; goto out; } retrans++; /* update retransmit_time */ if (retransmit_time < RPC_MAX_BACKOFF * hz) retransmit_time = 2 * retransmit_time; next_sendtime += retransmit_time; goto send_again; } cu->cu_sent += CWNDSCALE; TAILQ_INSERT_TAIL(&cs->cs_pending, cr, cr_link); } got_reply: /* * Now decode and validate the response. We need to drop the * lock since xdr_replymsg may end up sleeping in malloc. */ mtx_unlock(&cs->cs_lock); if (ext && ext->rc_feedback) ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg); xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE); ok = xdr_replymsg(&xdrs, &reply_msg); cr->cr_mrep = NULL; if (ok) { if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) && (reply_msg.acpted_rply.ar_stat == SUCCESS)) errp->re_status = stat = RPC_SUCCESS; else stat = _seterr_reply(&reply_msg, &(cu->cu_error)); if (errp->re_status == RPC_SUCCESS) { results = xdrmbuf_getall(&xdrs); if (! AUTH_VALIDATE(auth, xid, &reply_msg.acpted_rply.ar_verf, &results)) { errp->re_status = stat = RPC_AUTHERROR; errp->re_why = AUTH_INVALIDRESP; if (retrans && auth->ah_cred.oa_flavor == RPCSEC_GSS) { /* * If we retransmitted, its * possible that we will * receive a reply for one of * the earlier transmissions * (which will use an older * RPCSEC_GSS sequence * number). In this case, just * go back and listen for a * new reply. We could keep a * record of all the seq * numbers we have transmitted * so far so that we could * accept a reply for any of * them here. */ XDR_DESTROY(&xdrs); mtx_lock(&cs->cs_lock); cu->cu_sent += CWNDSCALE; TAILQ_INSERT_TAIL(&cs->cs_pending, cr, cr_link); cr->cr_mrep = NULL; goto get_reply; } } else { *resultsp = results; } } /* end successful completion */ /* * If unsuccessful AND error is an authentication error * then refresh credentials and try again, else break */ else if (stat == RPC_AUTHERROR) /* maybe our credentials need to be refreshed ... */ if (nrefreshes > 0 && AUTH_REFRESH(auth, &reply_msg)) { nrefreshes--; XDR_DESTROY(&xdrs); mtx_lock(&cs->cs_lock); goto call_again; } /* end of unsuccessful completion */ } /* end of valid reply message */ else { errp->re_status = stat = RPC_CANTDECODERES; } XDR_DESTROY(&xdrs); mtx_lock(&cs->cs_lock); out: mtx_assert(&cs->cs_lock, MA_OWNED); if (mreq) m_freem(mreq); if (cr->cr_mrep) m_freem(cr->cr_mrep); cu->cu_threads--; if (cu->cu_closing) wakeup(cu); mtx_unlock(&cs->cs_lock); if (auth && stat != RPC_SUCCESS) AUTH_VALIDATE(auth, xid, NULL, NULL); free(cr, M_RPC); return (stat); } static void clnt_dg_geterr(CLIENT *cl, struct rpc_err *errp) { struct cu_data *cu = (struct cu_data *)cl->cl_private; *errp = cu->cu_error; } static bool_t clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) { XDR xdrs; bool_t dummy; xdrs.x_op = XDR_FREE; dummy = (*xdr_res)(&xdrs, res_ptr); return (dummy); } /*ARGSUSED*/ static void clnt_dg_abort(CLIENT *h) { } static bool_t clnt_dg_control(CLIENT *cl, u_int request, void *info) { struct cu_data *cu = (struct cu_data *)cl->cl_private; struct cu_socket *cs; struct sockaddr *addr; cs = cu->cu_socket->so_rcv.sb_upcallarg; mtx_lock(&cs->cs_lock); switch (request) { case CLSET_FD_CLOSE: cu->cu_closeit = TRUE; mtx_unlock(&cs->cs_lock); return (TRUE); case CLSET_FD_NCLOSE: cu->cu_closeit = FALSE; mtx_unlock(&cs->cs_lock); return (TRUE); } /* for other requests which use info */ if (info == NULL) { mtx_unlock(&cs->cs_lock); return (FALSE); } switch (request) { case CLSET_TIMEOUT: if (time_not_ok((struct timeval *)info)) { mtx_unlock(&cs->cs_lock); return (FALSE); } cu->cu_total = *(struct timeval *)info; break; case CLGET_TIMEOUT: *(struct timeval *)info = cu->cu_total; break; case CLSET_RETRY_TIMEOUT: if (time_not_ok((struct timeval *)info)) { mtx_unlock(&cs->cs_lock); return (FALSE); } cu->cu_wait = *(struct timeval *)info; break; case CLGET_RETRY_TIMEOUT: *(struct timeval *)info = cu->cu_wait; break; case CLGET_SVC_ADDR: /* * Slightly different semantics to userland - we use * sockaddr instead of netbuf. */ memcpy(info, &cu->cu_raddr, cu->cu_raddr.ss_len); break; case CLSET_SVC_ADDR: /* set to new address */ addr = (struct sockaddr *)info; (void) memcpy(&cu->cu_raddr, addr, addr->sa_len); break; case CLGET_XID: *(uint32_t *)info = atomic_load_32(&rpc_xid); break; case CLSET_XID: /* This will set the xid of the NEXT call */ /* decrement by 1 as clnt_dg_call() increments once */ atomic_store_32(&rpc_xid, *(uint32_t *)info - 1); break; case CLGET_VERS: /* * This RELIES on the information that, in the call body, * the version number field is the fifth field from the * beginning of the RPC header. MUST be changed if the * call_struct is changed */ *(uint32_t *)info = ntohl(*(uint32_t *)(void *)(cu->cu_mcallc + 4 * BYTES_PER_XDR_UNIT)); break; case CLSET_VERS: *(uint32_t *)(void *)(cu->cu_mcallc + 4 * BYTES_PER_XDR_UNIT) = htonl(*(uint32_t *)info); break; case CLGET_PROG: /* * This RELIES on the information that, in the call body, * the program number field is the fourth field from the * beginning of the RPC header. MUST be changed if the * call_struct is changed */ *(uint32_t *)info = ntohl(*(uint32_t *)(void *)(cu->cu_mcallc + 3 * BYTES_PER_XDR_UNIT)); break; case CLSET_PROG: *(uint32_t *)(void *)(cu->cu_mcallc + 3 * BYTES_PER_XDR_UNIT) = htonl(*(uint32_t *)info); break; case CLSET_ASYNC: cu->cu_async = *(int *)info; break; case CLSET_CONNECT: cu->cu_connect = *(int *)info; break; case CLSET_WAITCHAN: cu->cu_waitchan = (const char *)info; break; case CLGET_WAITCHAN: *(const char **) info = cu->cu_waitchan; break; case CLSET_INTERRUPTIBLE: if (*(int *) info) cu->cu_waitflag = PCATCH; else cu->cu_waitflag = 0; break; case CLGET_INTERRUPTIBLE: if (cu->cu_waitflag) *(int *) info = TRUE; else *(int *) info = FALSE; break; default: mtx_unlock(&cs->cs_lock); return (FALSE); } mtx_unlock(&cs->cs_lock); return (TRUE); } static void clnt_dg_close(CLIENT *cl) { struct cu_data *cu = (struct cu_data *)cl->cl_private; struct cu_socket *cs; struct cu_request *cr; cs = cu->cu_socket->so_rcv.sb_upcallarg; mtx_lock(&cs->cs_lock); if (cu->cu_closed) { mtx_unlock(&cs->cs_lock); return; } if (cu->cu_closing) { while (cu->cu_closing) msleep(cu, &cs->cs_lock, 0, "rpcclose", 0); KASSERT(cu->cu_closed, ("client should be closed")); mtx_unlock(&cs->cs_lock); return; } /* * Abort any pending requests and wait until everyone * has finished with clnt_vc_call. */ cu->cu_closing = TRUE; TAILQ_FOREACH(cr, &cs->cs_pending, cr_link) { if (cr->cr_client == cl) { cr->cr_xid = 0; cr->cr_error = ESHUTDOWN; wakeup(cr); } } while (cu->cu_threads) msleep(cu, &cs->cs_lock, 0, "rpcclose", 0); cu->cu_closing = FALSE; cu->cu_closed = TRUE; mtx_unlock(&cs->cs_lock); wakeup(cu); } static void clnt_dg_destroy(CLIENT *cl) { struct cu_data *cu = (struct cu_data *)cl->cl_private; struct cu_socket *cs; struct socket *so = NULL; bool_t lastsocketref; cs = cu->cu_socket->so_rcv.sb_upcallarg; clnt_dg_close(cl); SOCKBUF_LOCK(&cu->cu_socket->so_rcv); mtx_lock(&cs->cs_lock); cs->cs_refs--; if (cs->cs_refs == 0) { mtx_unlock(&cs->cs_lock); soupcall_clear(cu->cu_socket, SO_RCV); clnt_dg_upcallsdone(cu->cu_socket, cs); SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv); mtx_destroy(&cs->cs_lock); mem_free(cs, sizeof(*cs)); lastsocketref = TRUE; } else { mtx_unlock(&cs->cs_lock); SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv); lastsocketref = FALSE; } if (cu->cu_closeit && lastsocketref) { so = cu->cu_socket; cu->cu_socket = NULL; } if (so) soclose(so); if (cl->cl_netid && cl->cl_netid[0]) mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); if (cl->cl_tp && cl->cl_tp[0]) mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); mem_free(cu, sizeof (*cu)); mem_free(cl, sizeof (CLIENT)); } /* * Make sure that the time is not garbage. -1 value is allowed. */ static bool_t time_not_ok(struct timeval *t) { return (t->tv_sec < -1 || t->tv_sec > 100000000 || t->tv_usec < -1 || t->tv_usec > 1000000); } int clnt_dg_soupcall(struct socket *so, void *arg, int waitflag) { struct cu_socket *cs = (struct cu_socket *) arg; struct uio uio; struct mbuf *m; struct mbuf *control; struct cu_request *cr; int error, rcvflag, foundreq; uint32_t xid; cs->cs_upcallrefs++; uio.uio_resid = 1000000000; uio.uio_td = curthread; do { SOCKBUF_UNLOCK(&so->so_rcv); m = NULL; control = NULL; rcvflag = MSG_DONTWAIT; error = soreceive(so, NULL, &uio, &m, &control, &rcvflag); if (control) m_freem(control); SOCKBUF_LOCK(&so->so_rcv); if (error == EWOULDBLOCK) break; /* * If there was an error, wake up all pending * requests. */ if (error) { mtx_lock(&cs->cs_lock); TAILQ_FOREACH(cr, &cs->cs_pending, cr_link) { cr->cr_xid = 0; cr->cr_error = error; wakeup(cr); } mtx_unlock(&cs->cs_lock); break; } /* * The XID is in the first uint32_t of the reply. */ if (m->m_len < sizeof(xid) && m_length(m, NULL) < sizeof(xid)) { /* * Should never happen. */ m_freem(m); continue; } m_copydata(m, 0, sizeof(xid), (char *)&xid); xid = ntohl(xid); /* * Attempt to match this reply with a pending request. */ mtx_lock(&cs->cs_lock); foundreq = 0; TAILQ_FOREACH(cr, &cs->cs_pending, cr_link) { if (cr->cr_xid == xid) { /* * This one matches. We leave the * reply mbuf in cr->cr_mrep. Set the * XID to zero so that we will ignore * any duplicated replies that arrive * before clnt_dg_call removes it from * the queue. */ cr->cr_xid = 0; cr->cr_mrep = m; cr->cr_error = 0; foundreq = 1; wakeup(cr); break; } } mtx_unlock(&cs->cs_lock); /* * If we didn't find the matching request, just drop * it - its probably a repeated reply. */ if (!foundreq) m_freem(m); } while (m); cs->cs_upcallrefs--; if (cs->cs_upcallrefs < 0) panic("rpcdg upcall refcnt"); if (cs->cs_upcallrefs == 0) wakeup(&cs->cs_upcallrefs); return (SU_OK); } /* * Wait for all upcalls in progress to complete. */ static void clnt_dg_upcallsdone(struct socket *so, struct cu_socket *cs) { SOCKBUF_LOCK_ASSERT(&so->so_rcv); while (cs->cs_upcallrefs > 0) (void) msleep(&cs->cs_upcallrefs, SOCKBUF_MTX(&so->so_rcv), 0, "rpcdgup", 0); } diff --git a/sys/rpc/clnt_rc.c b/sys/rpc/clnt_rc.c index ae3b2985a891..aa69356b8cd2 100644 --- a/sys/rpc/clnt_rc.c +++ b/sys/rpc/clnt_rc.c @@ -1,581 +1,581 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static enum clnt_stat clnt_reconnect_call(CLIENT *, struct rpc_callextra *, rpcproc_t, struct mbuf *, struct mbuf **, struct timeval); static void clnt_reconnect_geterr(CLIENT *, struct rpc_err *); static bool_t clnt_reconnect_freeres(CLIENT *, xdrproc_t, void *); static void clnt_reconnect_abort(CLIENT *); static bool_t clnt_reconnect_control(CLIENT *, u_int, void *); static void clnt_reconnect_close(CLIENT *); static void clnt_reconnect_destroy(CLIENT *); -static struct clnt_ops clnt_reconnect_ops = { +static const struct clnt_ops clnt_reconnect_ops = { .cl_call = clnt_reconnect_call, .cl_abort = clnt_reconnect_abort, .cl_geterr = clnt_reconnect_geterr, .cl_freeres = clnt_reconnect_freeres, .cl_close = clnt_reconnect_close, .cl_destroy = clnt_reconnect_destroy, .cl_control = clnt_reconnect_control }; static int fake_wchan; CLIENT * clnt_reconnect_create( struct netconfig *nconf, /* network type */ struct sockaddr *svcaddr, /* servers address */ rpcprog_t program, /* program number */ rpcvers_t version, /* version number */ size_t sendsz, /* buffer recv size */ size_t recvsz) /* buffer send size */ { CLIENT *cl = NULL; /* client handle */ struct rc_data *rc = NULL; /* private data */ if (svcaddr == NULL) { rpc_createerr.cf_stat = RPC_UNKNOWNADDR; return (NULL); } cl = mem_alloc(sizeof (CLIENT)); rc = mem_alloc(sizeof (*rc)); mtx_init(&rc->rc_lock, "rc->rc_lock", NULL, MTX_DEF); (void) memcpy(&rc->rc_addr, svcaddr, (size_t)svcaddr->sa_len); rc->rc_nconf = nconf; rc->rc_prog = program; rc->rc_vers = version; rc->rc_sendsz = sendsz; rc->rc_recvsz = recvsz; rc->rc_timeout.tv_sec = -1; rc->rc_timeout.tv_usec = -1; rc->rc_retry.tv_sec = 3; rc->rc_retry.tv_usec = 0; rc->rc_retries = INT_MAX; rc->rc_privport = FALSE; rc->rc_waitchan = "rpcrecv"; rc->rc_intr = 0; rc->rc_connecting = FALSE; rc->rc_closed = FALSE; rc->rc_ucred = crdup(curthread->td_ucred); rc->rc_client = NULL; rc->rc_tls = false; rc->rc_tlscertname = NULL; rc->rc_reconcall = NULL; rc->rc_reconarg = NULL; cl->cl_refs = 1; cl->cl_ops = &clnt_reconnect_ops; cl->cl_private = (caddr_t)(void *)rc; cl->cl_auth = authnone_create(); cl->cl_tp = NULL; cl->cl_netid = NULL; return (cl); } static enum clnt_stat clnt_reconnect_connect(CLIENT *cl) { struct thread *td = curthread; struct rc_data *rc = (struct rc_data *)cl->cl_private; struct socket *so; enum clnt_stat stat; int error; int one = 1; struct ucred *oldcred; CLIENT *newclient = NULL; uint64_t ssl[3]; uint32_t reterr; mtx_lock(&rc->rc_lock); while (rc->rc_connecting) { error = msleep(rc, &rc->rc_lock, rc->rc_intr ? PCATCH : 0, "rpcrecon", 0); if (error) { mtx_unlock(&rc->rc_lock); return (RPC_INTR); } } if (rc->rc_closed) { mtx_unlock(&rc->rc_lock); return (RPC_CANTSEND); } if (rc->rc_client) { mtx_unlock(&rc->rc_lock); return (RPC_SUCCESS); } /* * My turn to attempt a connect. The rc_connecting variable * serializes the following code sequence, so it is guaranteed * that rc_client will still be NULL after it is re-locked below, * since that is the only place it is set non-NULL. */ rc->rc_connecting = TRUE; mtx_unlock(&rc->rc_lock); oldcred = td->td_ucred; td->td_ucred = rc->rc_ucred; so = __rpc_nconf2socket(rc->rc_nconf); if (!so) { stat = rpc_createerr.cf_stat = RPC_TLIERROR; rpc_createerr.cf_error.re_errno = 0; td->td_ucred = oldcred; goto out; } if (rc->rc_privport) bindresvport(so, NULL); if (rc->rc_nconf->nc_semantics == NC_TPI_CLTS) newclient = clnt_dg_create(so, (struct sockaddr *) &rc->rc_addr, rc->rc_prog, rc->rc_vers, rc->rc_sendsz, rc->rc_recvsz); else { /* * I do not believe a timeout of less than 1sec would make * sense here since short delays can occur when a server is * temporarily overloaded. */ if (rc->rc_timeout.tv_sec > 0 && rc->rc_timeout.tv_usec >= 0) { error = so_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &rc->rc_timeout, sizeof(struct timeval)); if (error != 0) { stat = rpc_createerr.cf_stat = RPC_CANTSEND; rpc_createerr.cf_error.re_errno = error; td->td_ucred = oldcred; goto out; } } newclient = clnt_vc_create(so, (struct sockaddr *) &rc->rc_addr, rc->rc_prog, rc->rc_vers, rc->rc_sendsz, rc->rc_recvsz, rc->rc_intr); if (rc->rc_tls && newclient != NULL) { stat = rpctls_connect(newclient, rc->rc_tlscertname, so, ssl, &reterr); if (stat != RPC_SUCCESS || reterr != RPCTLSERR_OK) { if (stat == RPC_SUCCESS) stat = RPC_FAILED; stat = rpc_createerr.cf_stat = stat; rpc_createerr.cf_error.re_errno = 0; CLNT_CLOSE(newclient); CLNT_RELEASE(newclient); newclient = NULL; td->td_ucred = oldcred; goto out; } } if (newclient != NULL && rc->rc_reconcall != NULL) (*rc->rc_reconcall)(newclient, rc->rc_reconarg, rc->rc_ucred); } td->td_ucred = oldcred; if (!newclient) { soclose(so); rc->rc_err = rpc_createerr.cf_error; stat = rpc_createerr.cf_stat; goto out; } CLNT_CONTROL(newclient, CLSET_FD_CLOSE, 0); CLNT_CONTROL(newclient, CLSET_CONNECT, &one); CLNT_CONTROL(newclient, CLSET_TIMEOUT, &rc->rc_timeout); CLNT_CONTROL(newclient, CLSET_RETRY_TIMEOUT, &rc->rc_retry); CLNT_CONTROL(newclient, CLSET_WAITCHAN, rc->rc_waitchan); CLNT_CONTROL(newclient, CLSET_INTERRUPTIBLE, &rc->rc_intr); if (rc->rc_tls) CLNT_CONTROL(newclient, CLSET_TLS, ssl); if (rc->rc_backchannel != NULL) CLNT_CONTROL(newclient, CLSET_BACKCHANNEL, rc->rc_backchannel); stat = RPC_SUCCESS; out: mtx_lock(&rc->rc_lock); KASSERT(rc->rc_client == NULL, ("rc_client not null")); if (!rc->rc_closed) { rc->rc_client = newclient; newclient = NULL; } rc->rc_connecting = FALSE; wakeup(rc); mtx_unlock(&rc->rc_lock); if (newclient) { /* * It has been closed, so discard the new client. * nb: clnt_[dg|vc]_close()/clnt_[dg|vc]_destroy() cannot * be called with the rc_lock mutex held, since they may * msleep() while holding a different mutex. */ CLNT_CLOSE(newclient); CLNT_RELEASE(newclient); } return (stat); } static enum clnt_stat clnt_reconnect_call( CLIENT *cl, /* client handle */ struct rpc_callextra *ext, /* call metadata */ rpcproc_t proc, /* procedure number */ struct mbuf *args, /* pointer to args */ struct mbuf **resultsp, /* pointer to results */ struct timeval utimeout) { struct rc_data *rc = (struct rc_data *)cl->cl_private; CLIENT *client; enum clnt_stat stat; int tries, error; tries = 0; do { mtx_lock(&rc->rc_lock); if (rc->rc_closed) { mtx_unlock(&rc->rc_lock); return (RPC_CANTSEND); } if (!rc->rc_client) { mtx_unlock(&rc->rc_lock); stat = clnt_reconnect_connect(cl); if (stat == RPC_SYSTEMERROR) { error = tsleep(&fake_wchan, rc->rc_intr ? PCATCH : 0, "rpccon", hz); if (error == EINTR || error == ERESTART) return (RPC_INTR); tries++; if (tries >= rc->rc_retries) return (stat); continue; } if (stat != RPC_SUCCESS) return (stat); mtx_lock(&rc->rc_lock); } if (!rc->rc_client) { mtx_unlock(&rc->rc_lock); stat = RPC_FAILED; continue; } CLNT_ACQUIRE(rc->rc_client); client = rc->rc_client; mtx_unlock(&rc->rc_lock); stat = CLNT_CALL_MBUF(client, ext, proc, args, resultsp, utimeout); if (stat != RPC_SUCCESS) { if (!ext) CLNT_GETERR(client, &rc->rc_err); } if (stat == RPC_TIMEDOUT) { /* * Check for async send misfeature for NLM * protocol. */ if ((rc->rc_timeout.tv_sec == 0 && rc->rc_timeout.tv_usec == 0) || (rc->rc_timeout.tv_sec == -1 && utimeout.tv_sec == 0 && utimeout.tv_usec == 0)) { CLNT_RELEASE(client); break; } } if (stat == RPC_TIMEDOUT || stat == RPC_CANTSEND || stat == RPC_CANTRECV) { tries++; if (tries >= rc->rc_retries) { CLNT_RELEASE(client); break; } if (ext && ext->rc_feedback) ext->rc_feedback(FEEDBACK_RECONNECT, proc, ext->rc_feedback_arg); mtx_lock(&rc->rc_lock); /* * Make sure that someone else hasn't already * reconnected by checking if rc_client has changed. * If not, we are done with the client and must * do CLNT_RELEASE(client) twice to dispose of it, * because there is both an initial refcnt and one * acquired by CLNT_ACQUIRE() above. */ if (rc->rc_client == client) { rc->rc_client = NULL; mtx_unlock(&rc->rc_lock); CLNT_RELEASE(client); } else { mtx_unlock(&rc->rc_lock); } CLNT_RELEASE(client); } else { CLNT_RELEASE(client); break; } } while (stat != RPC_SUCCESS); KASSERT(stat != RPC_SUCCESS || *resultsp, ("RPC_SUCCESS without reply")); return (stat); } static void clnt_reconnect_geterr(CLIENT *cl, struct rpc_err *errp) { struct rc_data *rc = (struct rc_data *)cl->cl_private; *errp = rc->rc_err; } /* * Since this function requires that rc_client be valid, it can * only be called when that is guaranteed to be the case. */ static bool_t clnt_reconnect_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) { struct rc_data *rc = (struct rc_data *)cl->cl_private; return (CLNT_FREERES(rc->rc_client, xdr_res, res_ptr)); } /*ARGSUSED*/ static void clnt_reconnect_abort(CLIENT *h) { } /* * CLNT_CONTROL() on the client returned by clnt_reconnect_create() must * always be called before CLNT_CALL_MBUF() by a single thread only. */ static bool_t clnt_reconnect_control(CLIENT *cl, u_int request, void *info) { struct rc_data *rc = (struct rc_data *)cl->cl_private; SVCXPRT *xprt; size_t slen; struct rpc_reconupcall *upcp; if (info == NULL) { return (FALSE); } switch (request) { case CLSET_TIMEOUT: rc->rc_timeout = *(struct timeval *)info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLGET_TIMEOUT: *(struct timeval *)info = rc->rc_timeout; break; case CLSET_RETRY_TIMEOUT: rc->rc_retry = *(struct timeval *)info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLGET_RETRY_TIMEOUT: *(struct timeval *)info = rc->rc_retry; break; case CLGET_VERS: *(uint32_t *)info = rc->rc_vers; break; case CLSET_VERS: rc->rc_vers = *(uint32_t *) info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, CLSET_VERS, info); break; case CLGET_PROG: *(uint32_t *)info = rc->rc_prog; break; case CLSET_PROG: rc->rc_prog = *(uint32_t *) info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLSET_WAITCHAN: rc->rc_waitchan = (char *)info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLGET_WAITCHAN: *(const char **) info = rc->rc_waitchan; break; case CLSET_INTERRUPTIBLE: rc->rc_intr = *(int *) info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLGET_INTERRUPTIBLE: *(int *) info = rc->rc_intr; break; case CLSET_RETRIES: rc->rc_retries = *(int *) info; break; case CLGET_RETRIES: *(int *) info = rc->rc_retries; break; case CLSET_PRIVPORT: rc->rc_privport = *(int *) info; break; case CLGET_PRIVPORT: *(int *) info = rc->rc_privport; break; case CLSET_BACKCHANNEL: xprt = (SVCXPRT *)info; xprt_register(xprt); rc->rc_backchannel = info; break; case CLSET_TLS: rc->rc_tls = true; break; case CLSET_TLSCERTNAME: slen = strlen(info) + 1; /* * tlscertname with "key.pem" appended to it forms a file * name. As such, the maximum allowable strlen(info) is * NAME_MAX - 7. However, "slen" includes the nul termination * byte so it can be up to NAME_MAX - 6. */ if (slen <= 1 || slen > NAME_MAX - 6) return (FALSE); rc->rc_tlscertname = mem_alloc(slen); strlcpy(rc->rc_tlscertname, info, slen); break; case CLSET_RECONUPCALL: upcp = (struct rpc_reconupcall *)info; rc->rc_reconcall = upcp->call; rc->rc_reconarg = upcp->arg; break; default: return (FALSE); } return (TRUE); } static void clnt_reconnect_close(CLIENT *cl) { struct rc_data *rc = (struct rc_data *)cl->cl_private; CLIENT *client; mtx_lock(&rc->rc_lock); if (rc->rc_closed) { mtx_unlock(&rc->rc_lock); return; } rc->rc_closed = TRUE; client = rc->rc_client; rc->rc_client = NULL; mtx_unlock(&rc->rc_lock); if (client) { CLNT_CLOSE(client); CLNT_RELEASE(client); } } static void clnt_reconnect_destroy(CLIENT *cl) { struct rc_data *rc = (struct rc_data *)cl->cl_private; SVCXPRT *xprt; if (rc->rc_client) CLNT_DESTROY(rc->rc_client); if (rc->rc_backchannel) { xprt = (SVCXPRT *)rc->rc_backchannel; KASSERT(xprt->xp_socket == NULL, ("clnt_reconnect_destroy: xp_socket not NULL")); xprt_unregister(xprt); SVC_RELEASE(xprt); } crfree(rc->rc_ucred); mtx_destroy(&rc->rc_lock); mem_free(rc->rc_tlscertname, 0); /* 0 ok, since arg. ignored. */ mem_free(rc->rc_reconarg, 0); mem_free(rc, sizeof(*rc)); mem_free(cl, sizeof (CLIENT)); } diff --git a/sys/rpc/clnt_vc.c b/sys/rpc/clnt_vc.c index 92c216e227d1..7d22c670b017 100644 --- a/sys/rpc/clnt_vc.c +++ b/sys/rpc/clnt_vc.c @@ -1,1322 +1,1322 @@ /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC"; static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro"; #endif #include __FBSDID("$FreeBSD$"); /* * clnt_tcp.c, Implements a TCP/IP based, client side RPC. * * Copyright (C) 1984, Sun Microsystems, Inc. * * TCP based RPC supports 'batched calls'. * A sequence of calls may be batched-up in a send buffer. The rpc call * return immediately to the client even though the call was not necessarily * sent. The batching occurs if the results' xdr routine is NULL (0) AND * the rpc timeout value is zero (see clnt.h, rpc). * * Clients should NOT casually batch calls that in fact return results; that is, * the server side should be aware that a call is batched and not produce any * return message. Batched calls that produce many result messages can * deadlock (netlock) the client and the server.... * * Now go hang yourself. */ #include "opt_kern_tls.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct cmessage { struct cmsghdr cmsg; struct cmsgcred cmcred; }; static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *, rpcproc_t, struct mbuf *, struct mbuf **, struct timeval); static void clnt_vc_geterr(CLIENT *, struct rpc_err *); static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); static void clnt_vc_abort(CLIENT *); static bool_t clnt_vc_control(CLIENT *, u_int, void *); static void clnt_vc_close(CLIENT *); static void clnt_vc_destroy(CLIENT *); static bool_t time_not_ok(struct timeval *); static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag); static void clnt_vc_dotlsupcall(void *data); -static struct clnt_ops clnt_vc_ops = { +static const struct clnt_ops clnt_vc_ops = { .cl_call = clnt_vc_call, .cl_abort = clnt_vc_abort, .cl_geterr = clnt_vc_geterr, .cl_freeres = clnt_vc_freeres, .cl_close = clnt_vc_close, .cl_destroy = clnt_vc_destroy, .cl_control = clnt_vc_control }; static void clnt_vc_upcallsdone(struct ct_data *); static int fake_wchan; /* * Create a client handle for a connection. * Default options are set, which the user can change using clnt_control()'s. * The rpc/vc package does buffering similar to stdio, so the client * must pick send and receive buffer sizes, 0 => use the default. * NB: fd is copied into a private area. * NB: The rpch->cl_auth is set null authentication. Caller may wish to * set this something more useful. * * fd should be an open socket */ CLIENT * clnt_vc_create( struct socket *so, /* open file descriptor */ struct sockaddr *raddr, /* servers address */ const rpcprog_t prog, /* program number */ const rpcvers_t vers, /* version number */ size_t sendsz, /* buffer recv size */ size_t recvsz, /* buffer send size */ int intrflag) /* interruptible */ { CLIENT *cl; /* client handle */ struct ct_data *ct = NULL; /* client handle */ struct timeval now; struct rpc_msg call_msg; static uint32_t disrupt; struct __rpc_sockinfo si; XDR xdrs; int error, interrupted, one = 1, sleep_flag; struct sockopt sopt; if (disrupt == 0) disrupt = (uint32_t)(long)raddr; cl = (CLIENT *)mem_alloc(sizeof (*cl)); ct = (struct ct_data *)mem_alloc(sizeof (*ct)); mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF); ct->ct_threads = 0; ct->ct_closing = FALSE; ct->ct_closed = FALSE; ct->ct_upcallrefs = 0; ct->ct_rcvstate = RPCRCVSTATE_NORMAL; if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { error = soconnect(so, raddr, curthread); SOCK_LOCK(so); interrupted = 0; sleep_flag = PSOCK; if (intrflag != 0) sleep_flag |= PCATCH; while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { error = msleep(&so->so_timeo, SOCK_MTX(so), sleep_flag, "connec", 0); if (error) { if (error == EINTR || error == ERESTART) interrupted = 1; break; } } if (error == 0) { error = so->so_error; so->so_error = 0; } SOCK_UNLOCK(so); if (error) { if (!interrupted) so->so_state &= ~SS_ISCONNECTING; rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = error; goto err; } } if (!__rpc_socket2sockinfo(so, &si)) { goto err; } if (so->so_proto->pr_flags & PR_CONNREQUIRED) { bzero(&sopt, sizeof(sopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_KEEPALIVE; sopt.sopt_val = &one; sopt.sopt_valsize = sizeof(one); sosetopt(so, &sopt); } if (so->so_proto->pr_protocol == IPPROTO_TCP) { bzero(&sopt, sizeof(sopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = &one; sopt.sopt_valsize = sizeof(one); sosetopt(so, &sopt); } ct->ct_closeit = FALSE; /* * Set up private data struct */ ct->ct_socket = so; ct->ct_wait.tv_sec = -1; ct->ct_wait.tv_usec = -1; memcpy(&ct->ct_addr, raddr, raddr->sa_len); /* * Initialize call message */ getmicrotime(&now); ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now); call_msg.rm_xid = ct->ct_xid; call_msg.rm_direction = CALL; call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; call_msg.rm_call.cb_prog = (uint32_t)prog; call_msg.rm_call.cb_vers = (uint32_t)vers; /* * pre-serialize the static part of the call msg and stash it away */ xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE, XDR_ENCODE); if (! xdr_callhdr(&xdrs, &call_msg)) { if (ct->ct_closeit) { soclose(ct->ct_socket); } goto err; } ct->ct_mpos = XDR_GETPOS(&xdrs); XDR_DESTROY(&xdrs); ct->ct_waitchan = "rpcrecv"; ct->ct_waitflag = 0; /* * Create a client handle which uses xdrrec for serialization * and authnone for authentication. */ sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); error = soreserve(ct->ct_socket, sendsz, recvsz); if (error != 0) { if (ct->ct_closeit) { soclose(ct->ct_socket); } goto err; } cl->cl_refs = 1; cl->cl_ops = &clnt_vc_ops; cl->cl_private = ct; cl->cl_auth = authnone_create(); SOCKBUF_LOCK(&ct->ct_socket->so_rcv); soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct); SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); ct->ct_raw = NULL; ct->ct_record = NULL; ct->ct_record_resid = 0; ct->ct_sslrefno = 0; TAILQ_INIT(&ct->ct_pending); return (cl); err: mtx_destroy(&ct->ct_lock); mem_free(ct, sizeof (struct ct_data)); mem_free(cl, sizeof (CLIENT)); return ((CLIENT *)NULL); } static enum clnt_stat clnt_vc_call( CLIENT *cl, /* client handle */ struct rpc_callextra *ext, /* call metadata */ rpcproc_t proc, /* procedure number */ struct mbuf *args, /* pointer to args */ struct mbuf **resultsp, /* pointer to results */ struct timeval utimeout) { struct ct_data *ct = (struct ct_data *) cl->cl_private; AUTH *auth; struct rpc_err *errp; enum clnt_stat stat; XDR xdrs; struct rpc_msg reply_msg; bool_t ok; int nrefreshes = 2; /* number of times to refresh cred */ struct timeval timeout; uint32_t xid; struct mbuf *mreq = NULL, *results; struct ct_request *cr; int error, maxextsiz, trycnt; #ifdef KERN_TLS u_int maxlen; #endif cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK); mtx_lock(&ct->ct_lock); if (ct->ct_closing || ct->ct_closed) { mtx_unlock(&ct->ct_lock); free(cr, M_RPC); return (RPC_CANTSEND); } ct->ct_threads++; if (ext) { auth = ext->rc_auth; errp = &ext->rc_err; } else { auth = cl->cl_auth; errp = &ct->ct_error; } cr->cr_mrep = NULL; cr->cr_error = 0; if (ct->ct_wait.tv_usec == -1) { timeout = utimeout; /* use supplied timeout */ } else { timeout = ct->ct_wait; /* use default timeout */ } /* * After 15sec of looping, allow it to return RPC_CANTSEND, which will * cause the clnt_reconnect layer to create a new TCP connection. */ trycnt = 15 * hz; call_again: mtx_assert(&ct->ct_lock, MA_OWNED); if (ct->ct_closing || ct->ct_closed) { ct->ct_threads--; wakeup(ct); mtx_unlock(&ct->ct_lock); free(cr, M_RPC); return (RPC_CANTSEND); } ct->ct_xid++; xid = ct->ct_xid; mtx_unlock(&ct->ct_lock); /* * Leave space to pre-pend the record mark. */ mreq = m_gethdr(M_WAITOK, MT_DATA); mreq->m_data += sizeof(uint32_t); KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN, ("RPC header too big")); bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos); mreq->m_len = ct->ct_mpos; /* * The XID is the first thing in the request. */ *mtod(mreq, uint32_t *) = htonl(xid); xdrmbuf_create(&xdrs, mreq, XDR_ENCODE); errp->re_status = stat = RPC_SUCCESS; if ((! XDR_PUTINT32(&xdrs, &proc)) || (! AUTH_MARSHALL(auth, xid, &xdrs, m_copym(args, 0, M_COPYALL, M_WAITOK)))) { errp->re_status = stat = RPC_CANTENCODEARGS; mtx_lock(&ct->ct_lock); goto out; } mreq->m_pkthdr.len = m_length(mreq, NULL); /* * Prepend a record marker containing the packet length. */ M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK); *mtod(mreq, uint32_t *) = htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t))); cr->cr_xid = xid; mtx_lock(&ct->ct_lock); /* * Check to see if the other end has already started to close down * the connection. The upcall will have set ct_error.re_status * to RPC_CANTRECV if this is the case. * If the other end starts to close down the connection after this * point, it will be detected later when cr_error is checked, * since the request is in the ct_pending queue. */ if (ct->ct_error.re_status == RPC_CANTRECV) { if (errp != &ct->ct_error) { errp->re_errno = ct->ct_error.re_errno; errp->re_status = RPC_CANTRECV; } stat = RPC_CANTRECV; goto out; } /* For TLS, wait for an upcall to be done, as required. */ while ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL | RPCRCVSTATE_NONAPPDATA)) == 0) msleep(&ct->ct_rcvstate, &ct->ct_lock, 0, "rpcrcvst", hz); TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link); mtx_unlock(&ct->ct_lock); if (ct->ct_sslrefno != 0) { /* * Copy the mbuf chain to a chain of ext_pgs mbuf(s) * as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mreq = _rpc_copym_into_ext_pgs(mreq, maxextsiz); } /* * sosend consumes mreq. */ error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread); mreq = NULL; if (error == EMSGSIZE || (error == ERESTART && (ct->ct_waitflag & PCATCH) == 0 && trycnt-- > 0)) { SOCKBUF_LOCK(&ct->ct_socket->so_snd); sbwait(&ct->ct_socket->so_snd); SOCKBUF_UNLOCK(&ct->ct_socket->so_snd); AUTH_VALIDATE(auth, xid, NULL, NULL); mtx_lock(&ct->ct_lock); TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); /* Sleep for 1 clock tick before trying the sosend() again. */ msleep(&fake_wchan, &ct->ct_lock, 0, "rpclpsnd", 1); goto call_again; } reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL; reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf; reply_msg.acpted_rply.ar_verf.oa_length = 0; reply_msg.acpted_rply.ar_results.where = NULL; reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; mtx_lock(&ct->ct_lock); if (error) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_errno = error; errp->re_status = stat = RPC_CANTSEND; goto out; } /* * Check to see if we got an upcall while waiting for the * lock. In both these cases, the request has been removed * from ct->ct_pending. */ if (cr->cr_error) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_errno = cr->cr_error; errp->re_status = stat = RPC_CANTRECV; goto out; } if (cr->cr_mrep) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); goto got_reply; } /* * Hack to provide rpc-based message passing */ if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); errp->re_status = stat = RPC_TIMEDOUT; goto out; } error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan, tvtohz(&timeout)); TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); if (error) { /* * The sleep returned an error so our request is still * on the list. Turn the error code into an * appropriate client status. */ errp->re_errno = error; switch (error) { case EINTR: stat = RPC_INTR; break; case EWOULDBLOCK: stat = RPC_TIMEDOUT; break; default: stat = RPC_CANTRECV; } errp->re_status = stat; goto out; } else { /* * We were woken up by the upcall. If the * upcall had a receive error, report that, * otherwise we have a reply. */ if (cr->cr_error) { errp->re_errno = cr->cr_error; errp->re_status = stat = RPC_CANTRECV; goto out; } } got_reply: /* * Now decode and validate the response. We need to drop the * lock since xdr_replymsg may end up sleeping in malloc. */ mtx_unlock(&ct->ct_lock); if (ext && ext->rc_feedback) ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg); xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE); ok = xdr_replymsg(&xdrs, &reply_msg); cr->cr_mrep = NULL; if (ok) { if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) && (reply_msg.acpted_rply.ar_stat == SUCCESS)) errp->re_status = stat = RPC_SUCCESS; else stat = _seterr_reply(&reply_msg, errp); if (stat == RPC_SUCCESS) { results = xdrmbuf_getall(&xdrs); if (!AUTH_VALIDATE(auth, xid, &reply_msg.acpted_rply.ar_verf, &results)) { errp->re_status = stat = RPC_AUTHERROR; errp->re_why = AUTH_INVALIDRESP; } else { KASSERT(results, ("auth validated but no result")); *resultsp = results; } } /* end successful completion */ /* * If unsuccessful AND error is an authentication error * then refresh credentials and try again, else break */ else if (stat == RPC_AUTHERROR) /* maybe our credentials need to be refreshed ... */ if (nrefreshes > 0 && AUTH_REFRESH(auth, &reply_msg)) { nrefreshes--; XDR_DESTROY(&xdrs); mtx_lock(&ct->ct_lock); goto call_again; } /* end of unsuccessful completion */ } /* end of valid reply message */ else { errp->re_status = stat = RPC_CANTDECODERES; } XDR_DESTROY(&xdrs); mtx_lock(&ct->ct_lock); out: mtx_assert(&ct->ct_lock, MA_OWNED); KASSERT(stat != RPC_SUCCESS || *resultsp, ("RPC_SUCCESS without reply")); if (mreq) m_freem(mreq); if (cr->cr_mrep) m_freem(cr->cr_mrep); ct->ct_threads--; if (ct->ct_closing) wakeup(ct); mtx_unlock(&ct->ct_lock); if (auth && stat != RPC_SUCCESS) AUTH_VALIDATE(auth, xid, NULL, NULL); free(cr, M_RPC); return (stat); } static void clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp) { struct ct_data *ct = (struct ct_data *) cl->cl_private; *errp = ct->ct_error; } static bool_t clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) { XDR xdrs; bool_t dummy; xdrs.x_op = XDR_FREE; dummy = (*xdr_res)(&xdrs, res_ptr); return (dummy); } /*ARGSUSED*/ static void clnt_vc_abort(CLIENT *cl) { } static bool_t clnt_vc_control(CLIENT *cl, u_int request, void *info) { struct ct_data *ct = (struct ct_data *)cl->cl_private; void *infop = info; SVCXPRT *xprt; uint64_t *p; int error; static u_int thrdnum = 0; mtx_lock(&ct->ct_lock); switch (request) { case CLSET_FD_CLOSE: ct->ct_closeit = TRUE; mtx_unlock(&ct->ct_lock); return (TRUE); case CLSET_FD_NCLOSE: ct->ct_closeit = FALSE; mtx_unlock(&ct->ct_lock); return (TRUE); default: break; } /* for other requests which use info */ if (info == NULL) { mtx_unlock(&ct->ct_lock); return (FALSE); } switch (request) { case CLSET_TIMEOUT: if (time_not_ok((struct timeval *)info)) { mtx_unlock(&ct->ct_lock); return (FALSE); } ct->ct_wait = *(struct timeval *)infop; break; case CLGET_TIMEOUT: *(struct timeval *)infop = ct->ct_wait; break; case CLGET_SERVER_ADDR: (void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len); break; case CLGET_SVC_ADDR: /* * Slightly different semantics to userland - we use * sockaddr instead of netbuf. */ memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len); break; case CLSET_SVC_ADDR: /* set to new address */ mtx_unlock(&ct->ct_lock); return (FALSE); case CLGET_XID: *(uint32_t *)info = ct->ct_xid; break; case CLSET_XID: /* This will set the xid of the NEXT call */ /* decrement by 1 as clnt_vc_call() increments once */ ct->ct_xid = *(uint32_t *)info - 1; break; case CLGET_VERS: /* * This RELIES on the information that, in the call body, * the version number field is the fifth field from the * beginning of the RPC header. MUST be changed if the * call_struct is changed */ *(uint32_t *)info = ntohl(*(uint32_t *)(void *)(ct->ct_mcallc + 4 * BYTES_PER_XDR_UNIT)); break; case CLSET_VERS: *(uint32_t *)(void *)(ct->ct_mcallc + 4 * BYTES_PER_XDR_UNIT) = htonl(*(uint32_t *)info); break; case CLGET_PROG: /* * This RELIES on the information that, in the call body, * the program number field is the fourth field from the * beginning of the RPC header. MUST be changed if the * call_struct is changed */ *(uint32_t *)info = ntohl(*(uint32_t *)(void *)(ct->ct_mcallc + 3 * BYTES_PER_XDR_UNIT)); break; case CLSET_PROG: *(uint32_t *)(void *)(ct->ct_mcallc + 3 * BYTES_PER_XDR_UNIT) = htonl(*(uint32_t *)info); break; case CLSET_WAITCHAN: ct->ct_waitchan = (const char *)info; break; case CLGET_WAITCHAN: *(const char **) info = ct->ct_waitchan; break; case CLSET_INTERRUPTIBLE: if (*(int *) info) ct->ct_waitflag = PCATCH; else ct->ct_waitflag = 0; break; case CLGET_INTERRUPTIBLE: if (ct->ct_waitflag) *(int *) info = TRUE; else *(int *) info = FALSE; break; case CLSET_BACKCHANNEL: xprt = (SVCXPRT *)info; if (ct->ct_backchannelxprt == NULL) { xprt->xp_p2 = ct; if (ct->ct_sslrefno != 0) xprt->xp_tls = RPCTLS_FLAGS_HANDSHAKE; ct->ct_backchannelxprt = xprt; } break; case CLSET_TLS: p = (uint64_t *)info; ct->ct_sslsec = *p++; ct->ct_sslusec = *p++; ct->ct_sslrefno = *p; if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) { mtx_unlock(&ct->ct_lock); /* Start the kthread that handles upcalls. */ error = kthread_add(clnt_vc_dotlsupcall, ct, NULL, NULL, 0, 0, "krpctls%u", thrdnum++); if (error != 0) panic("Can't add KRPC thread error %d", error); } else mtx_unlock(&ct->ct_lock); return (TRUE); case CLSET_BLOCKRCV: if (*(int *) info) { ct->ct_rcvstate &= ~RPCRCVSTATE_NORMAL; ct->ct_rcvstate |= RPCRCVSTATE_TLSHANDSHAKE; } else { ct->ct_rcvstate &= ~RPCRCVSTATE_TLSHANDSHAKE; ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; } break; default: mtx_unlock(&ct->ct_lock); return (FALSE); } mtx_unlock(&ct->ct_lock); return (TRUE); } static void clnt_vc_close(CLIENT *cl) { struct ct_data *ct = (struct ct_data *) cl->cl_private; struct ct_request *cr; mtx_lock(&ct->ct_lock); if (ct->ct_closed) { mtx_unlock(&ct->ct_lock); return; } if (ct->ct_closing) { while (ct->ct_closing) msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); KASSERT(ct->ct_closed, ("client should be closed")); mtx_unlock(&ct->ct_lock); return; } if (ct->ct_socket) { ct->ct_closing = TRUE; mtx_unlock(&ct->ct_lock); SOCKBUF_LOCK(&ct->ct_socket->so_rcv); if (ct->ct_socket->so_rcv.sb_upcall != NULL) { soupcall_clear(ct->ct_socket, SO_RCV); clnt_vc_upcallsdone(ct); } SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); /* * Abort any pending requests and wait until everyone * has finished with clnt_vc_call. */ mtx_lock(&ct->ct_lock); TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { cr->cr_xid = 0; cr->cr_error = ESHUTDOWN; wakeup(cr); } while (ct->ct_threads) msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); } ct->ct_closing = FALSE; ct->ct_closed = TRUE; wakeup(&ct->ct_sslrefno); mtx_unlock(&ct->ct_lock); wakeup(ct); } static void clnt_vc_destroy(CLIENT *cl) { struct ct_data *ct = (struct ct_data *) cl->cl_private; struct socket *so = NULL; SVCXPRT *xprt; enum clnt_stat stat; uint32_t reterr; clnt_vc_close(cl); mtx_lock(&ct->ct_lock); xprt = ct->ct_backchannelxprt; ct->ct_backchannelxprt = NULL; if (xprt != NULL) { mtx_unlock(&ct->ct_lock); /* To avoid a LOR. */ sx_xlock(&xprt->xp_lock); mtx_lock(&ct->ct_lock); xprt->xp_p2 = NULL; sx_xunlock(&xprt->xp_lock); } if (ct->ct_socket) { if (ct->ct_closeit) { so = ct->ct_socket; } } /* Wait for the upcall kthread to terminate. */ while ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLTHREAD) != 0) msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, "clntvccl", hz); mtx_unlock(&ct->ct_lock); mtx_destroy(&ct->ct_lock); if (so) { if (ct->ct_sslrefno != 0) { /* * If the TLS handshake is in progress, the upcall * will fail, but the socket should be closed by the * daemon, since the connect upcall has just failed. */ if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) { /* * If the upcall fails, the socket has * probably been closed via the rpctlscd * daemon having crashed or been * restarted, so ignore return stat. */ stat = rpctls_cl_disconnect(ct->ct_sslsec, ct->ct_sslusec, ct->ct_sslrefno, &reterr); } /* Must sorele() to get rid of reference. */ CURVNET_SET(so->so_vnet); SOCK_LOCK(so); sorele(so); CURVNET_RESTORE(); } else { soshutdown(so, SHUT_WR); soclose(so); } } m_freem(ct->ct_record); m_freem(ct->ct_raw); mem_free(ct, sizeof(struct ct_data)); if (cl->cl_netid && cl->cl_netid[0]) mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); if (cl->cl_tp && cl->cl_tp[0]) mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); mem_free(cl, sizeof(CLIENT)); } /* * Make sure that the time is not garbage. -1 value is disallowed. * Note this is different from time_not_ok in clnt_dg.c */ static bool_t time_not_ok(struct timeval *t) { return (t->tv_sec <= -1 || t->tv_sec > 100000000 || t->tv_usec <= -1 || t->tv_usec > 1000000); } int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag) { struct ct_data *ct = (struct ct_data *) arg; struct uio uio; struct mbuf *m, *m2, **ctrlp; struct ct_request *cr; int error, rcvflag, foundreq; uint32_t xid_plus_direction[2], header; SVCXPRT *xprt; struct cf_conn *cd; u_int rawlen; struct cmsghdr *cmsg; struct tls_get_record tgr; /* * RPC-over-TLS needs to block reception during * upcalls since the upcall will be doing I/O on * the socket via openssl library calls. */ mtx_lock(&ct->ct_lock); if ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL | RPCRCVSTATE_NONAPPDATA)) == 0) { /* Mark that a socket upcall needs to be done. */ if ((ct->ct_rcvstate & (RPCRCVSTATE_UPCALLNEEDED | RPCRCVSTATE_UPCALLINPROG)) != 0) ct->ct_rcvstate |= RPCRCVSTATE_SOUPCALLNEEDED; mtx_unlock(&ct->ct_lock); return (SU_OK); } mtx_unlock(&ct->ct_lock); /* * If another thread is already here, it must be in * soreceive(), so just return to avoid races with it. * ct_upcallrefs is protected by the SOCKBUF_LOCK(), * which is held in this function, except when * soreceive() is called. */ if (ct->ct_upcallrefs > 0) return (SU_OK); ct->ct_upcallrefs++; /* * Read as much as possible off the socket and link it * onto ct_raw. */ for (;;) { uio.uio_resid = 1000000000; uio.uio_td = curthread; m2 = m = NULL; rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK; if (ct->ct_sslrefno != 0 && (ct->ct_rcvstate & RPCRCVSTATE_NORMAL) != 0) { rcvflag |= MSG_TLSAPPDATA; ctrlp = NULL; } else ctrlp = &m2; SOCKBUF_UNLOCK(&so->so_rcv); error = soreceive(so, NULL, &uio, &m, ctrlp, &rcvflag); SOCKBUF_LOCK(&so->so_rcv); if (error == EWOULDBLOCK) { /* * We must re-test for readability after * taking the lock to protect us in the case * where a new packet arrives on the socket * after our call to soreceive fails with * EWOULDBLOCK. */ error = 0; if (!soreadable(so)) break; continue; } if (error == 0 && m == NULL) { /* * We must have got EOF trying * to read from the stream. */ error = ECONNRESET; } /* * A return of ENXIO indicates that there is a * non-application data record at the head of the * socket's receive queue, for TLS connections. * This record needs to be handled in userland * via an SSL_read() call, so do an upcall to the daemon. */ if (ct->ct_sslrefno != 0 && error == ENXIO) { /* Disable reception, marking an upcall needed. */ mtx_lock(&ct->ct_lock); ct->ct_rcvstate |= RPCRCVSTATE_UPCALLNEEDED; /* * If an upcall in needed, wake up the kthread * that runs clnt_vc_dotlsupcall(). */ wakeup(&ct->ct_sslrefno); mtx_unlock(&ct->ct_lock); break; } if (error != 0) break; /* Process any record header(s). */ if (m2 != NULL) { cmsg = mtod(m2, struct cmsghdr *); if (cmsg->cmsg_type == TLS_GET_RECORD && cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) { memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr)); /* * This should have been handled by * setting RPCRCVSTATE_UPCALLNEEDED in * ct_rcvstate but if not, all we can do * is toss it away. */ if (tgr.tls_type != TLS_RLTYPE_APP) { m_freem(m); m_free(m2); mtx_lock(&ct->ct_lock); ct->ct_rcvstate &= ~RPCRCVSTATE_NONAPPDATA; ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; mtx_unlock(&ct->ct_lock); continue; } } m_free(m2); } if (ct->ct_raw != NULL) m_last(ct->ct_raw)->m_next = m; else ct->ct_raw = m; } rawlen = m_length(ct->ct_raw, NULL); /* Now, process as much of ct_raw as possible. */ for (;;) { /* * If ct_record_resid is zero, we are waiting for a * record mark. */ if (ct->ct_record_resid == 0) { if (rawlen < sizeof(uint32_t)) break; m_copydata(ct->ct_raw, 0, sizeof(uint32_t), (char *)&header); header = ntohl(header); ct->ct_record_resid = header & 0x7fffffff; ct->ct_record_eor = ((header & 0x80000000) != 0); m_adj(ct->ct_raw, sizeof(uint32_t)); rawlen -= sizeof(uint32_t); } else { /* * Move as much of the record as possible to * ct_record. */ if (rawlen == 0) break; if (rawlen <= ct->ct_record_resid) { if (ct->ct_record != NULL) m_last(ct->ct_record)->m_next = ct->ct_raw; else ct->ct_record = ct->ct_raw; ct->ct_raw = NULL; ct->ct_record_resid -= rawlen; rawlen = 0; } else { m = m_split(ct->ct_raw, ct->ct_record_resid, M_NOWAIT); if (m == NULL) break; if (ct->ct_record != NULL) m_last(ct->ct_record)->m_next = ct->ct_raw; else ct->ct_record = ct->ct_raw; rawlen -= ct->ct_record_resid; ct->ct_record_resid = 0; ct->ct_raw = m; } if (ct->ct_record_resid > 0) break; /* * If we have the entire record, see if we can * match it to a request. */ if (ct->ct_record_eor) { /* * The XID is in the first uint32_t of * the reply and the message direction * is the second one. */ if (ct->ct_record->m_len < sizeof(xid_plus_direction) && m_length(ct->ct_record, NULL) < sizeof(xid_plus_direction)) { /* * What to do now? * The data in the TCP stream is * corrupted such that there is no * valid RPC message to parse. * I think it best to close this * connection and allow * clnt_reconnect_call() to try * and establish a new one. */ printf("clnt_vc_soupcall: " "connection data corrupted\n"); error = ECONNRESET; goto wakeup_all; } m_copydata(ct->ct_record, 0, sizeof(xid_plus_direction), (char *)xid_plus_direction); xid_plus_direction[0] = ntohl(xid_plus_direction[0]); xid_plus_direction[1] = ntohl(xid_plus_direction[1]); /* Check message direction. */ if (xid_plus_direction[1] == CALL) { /* This is a backchannel request. */ mtx_lock(&ct->ct_lock); xprt = ct->ct_backchannelxprt; if (xprt == NULL) { mtx_unlock(&ct->ct_lock); /* Just throw it away. */ m_freem(ct->ct_record); ct->ct_record = NULL; } else { cd = (struct cf_conn *) xprt->xp_p1; m2 = cd->mreq; /* * The requests are chained * in the m_nextpkt list. */ while (m2 != NULL && m2->m_nextpkt != NULL) /* Find end of list. */ m2 = m2->m_nextpkt; if (m2 != NULL) m2->m_nextpkt = ct->ct_record; else cd->mreq = ct->ct_record; ct->ct_record->m_nextpkt = NULL; ct->ct_record = NULL; xprt_active(xprt); mtx_unlock(&ct->ct_lock); } } else { mtx_lock(&ct->ct_lock); foundreq = 0; TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { if (cr->cr_xid == xid_plus_direction[0]) { /* * This one * matches. We leave * the reply mbuf in * cr->cr_mrep. Set * the XID to zero so * that we will ignore * any duplicated * replies. */ cr->cr_xid = 0; cr->cr_mrep = ct->ct_record; cr->cr_error = 0; foundreq = 1; wakeup(cr); break; } } mtx_unlock(&ct->ct_lock); if (!foundreq) m_freem(ct->ct_record); ct->ct_record = NULL; } } } } if (error != 0) { wakeup_all: /* * This socket is broken, so mark that it cannot * receive and fail all RPCs waiting for a reply * on it, so that they will be retried on a new * TCP connection created by clnt_reconnect_X(). */ mtx_lock(&ct->ct_lock); ct->ct_error.re_status = RPC_CANTRECV; ct->ct_error.re_errno = error; TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { cr->cr_error = error; wakeup(cr); } mtx_unlock(&ct->ct_lock); } ct->ct_upcallrefs--; if (ct->ct_upcallrefs < 0) panic("rpcvc upcall refcnt"); if (ct->ct_upcallrefs == 0) wakeup(&ct->ct_upcallrefs); return (SU_OK); } /* * Wait for all upcalls in progress to complete. */ static void clnt_vc_upcallsdone(struct ct_data *ct) { SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv); while (ct->ct_upcallrefs > 0) (void) msleep(&ct->ct_upcallrefs, SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0); } /* * Do a TLS upcall to the rpctlscd daemon, as required. * This function runs as a kthread. */ static void clnt_vc_dotlsupcall(void *data) { struct ct_data *ct = (struct ct_data *)data; enum clnt_stat ret; uint32_t reterr; mtx_lock(&ct->ct_lock); ct->ct_rcvstate |= RPCRCVSTATE_UPCALLTHREAD; while (!ct->ct_closed) { if ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLNEEDED) != 0) { ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLNEEDED; ct->ct_rcvstate |= RPCRCVSTATE_UPCALLINPROG; if (ct->ct_sslrefno != 0 && ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) { mtx_unlock(&ct->ct_lock); ret = rpctls_cl_handlerecord(ct->ct_sslsec, ct->ct_sslusec, ct->ct_sslrefno, &reterr); mtx_lock(&ct->ct_lock); } ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLINPROG; if (ret == RPC_SUCCESS && reterr == RPCTLSERR_OK) ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; else ct->ct_rcvstate |= RPCRCVSTATE_NONAPPDATA; wakeup(&ct->ct_rcvstate); } if ((ct->ct_rcvstate & RPCRCVSTATE_SOUPCALLNEEDED) != 0) { ct->ct_rcvstate &= ~RPCRCVSTATE_SOUPCALLNEEDED; mtx_unlock(&ct->ct_lock); SOCKBUF_LOCK(&ct->ct_socket->so_rcv); clnt_vc_soupcall(ct->ct_socket, ct, M_NOWAIT); SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); mtx_lock(&ct->ct_lock); } msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, "clntvcdu", hz); } ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLTHREAD; wakeup(&ct->ct_sslrefno); mtx_unlock(&ct->ct_lock); kthread_exit(); } diff --git a/sys/rpc/rpcsec_gss/rpcsec_gss.c b/sys/rpc/rpcsec_gss/rpcsec_gss.c index b384a8347ef5..32121be21091 100644 --- a/sys/rpc/rpcsec_gss/rpcsec_gss.c +++ b/sys/rpc/rpcsec_gss/rpcsec_gss.c @@ -1,1183 +1,1183 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2008 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* auth_gss.c RPCSEC_GSS client routines. Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: auth_gss.c,v 1.32 2002/01/15 15:43:00 andros Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rpcsec_gss_int.h" static void rpc_gss_nextverf(AUTH*); static bool_t rpc_gss_marshal(AUTH *, uint32_t, XDR *, struct mbuf *); static bool_t rpc_gss_init(AUTH *auth, rpc_gss_options_ret_t *options_ret); static bool_t rpc_gss_refresh(AUTH *, void *); static bool_t rpc_gss_validate(AUTH *, uint32_t, struct opaque_auth *, struct mbuf **); static void rpc_gss_destroy(AUTH *); static void rpc_gss_destroy_context(AUTH *, bool_t); -static struct auth_ops rpc_gss_ops = { +static const struct auth_ops rpc_gss_ops = { .ah_nextverf = rpc_gss_nextverf, .ah_marshal = rpc_gss_marshal, .ah_validate = rpc_gss_validate, .ah_refresh = rpc_gss_refresh, .ah_destroy = rpc_gss_destroy, }; enum rpcsec_gss_state { RPCSEC_GSS_START, RPCSEC_GSS_CONTEXT, RPCSEC_GSS_ESTABLISHED, RPCSEC_GSS_DESTROYING }; struct rpc_pending_request { uint32_t pr_xid; /* XID of rpc */ uint32_t pr_seq; /* matching GSS seq */ LIST_ENTRY(rpc_pending_request) pr_link; }; LIST_HEAD(rpc_pending_request_list, rpc_pending_request); struct rpc_gss_data { volatile u_int gd_refs; /* number of current users */ struct mtx gd_lock; uint32_t gd_hash; AUTH *gd_auth; /* link back to AUTH */ struct ucred *gd_ucred; /* matching local cred */ char *gd_principal; /* server principal name */ char *gd_clntprincipal; /* client principal name */ rpc_gss_options_req_t gd_options; /* GSS context options */ enum rpcsec_gss_state gd_state; /* connection state */ gss_buffer_desc gd_verf; /* save GSS_S_COMPLETE * NULL RPC verfier to * process at end of * context negotiation */ CLIENT *gd_clnt; /* client handle */ gss_OID gd_mech; /* mechanism to use */ gss_qop_t gd_qop; /* quality of protection */ gss_ctx_id_t gd_ctx; /* context id */ struct rpc_gss_cred gd_cred; /* client credentials */ uint32_t gd_seq; /* next sequence number */ u_int gd_win; /* sequence window */ struct rpc_pending_request_list gd_reqs; TAILQ_ENTRY(rpc_gss_data) gd_link; TAILQ_ENTRY(rpc_gss_data) gd_alllink; }; TAILQ_HEAD(rpc_gss_data_list, rpc_gss_data); #define AUTH_PRIVATE(auth) ((struct rpc_gss_data *)auth->ah_private) static struct timeval AUTH_TIMEOUT = { 25, 0 }; #define RPC_GSS_HASH_SIZE 11 #define RPC_GSS_MAX 256 static struct rpc_gss_data_list rpc_gss_cache[RPC_GSS_HASH_SIZE]; static struct rpc_gss_data_list rpc_gss_all; static struct sx rpc_gss_lock; static int rpc_gss_count; static AUTH *rpc_gss_seccreate_int(CLIENT *, struct ucred *, const char *, const char *, gss_OID, rpc_gss_service_t, u_int, rpc_gss_options_req_t *, rpc_gss_options_ret_t *); static void rpc_gss_hashinit(void *dummy) { int i; for (i = 0; i < RPC_GSS_HASH_SIZE; i++) TAILQ_INIT(&rpc_gss_cache[i]); TAILQ_INIT(&rpc_gss_all); sx_init(&rpc_gss_lock, "rpc_gss_lock"); } SYSINIT(rpc_gss_hashinit, SI_SUB_KMEM, SI_ORDER_ANY, rpc_gss_hashinit, NULL); static uint32_t rpc_gss_hash(const char *principal, gss_OID mech, struct ucred *cred, rpc_gss_service_t service) { uint32_t h; h = HASHSTEP(HASHINIT, cred->cr_uid); h = hash32_str(principal, h); h = hash32_buf(mech->elements, mech->length, h); h = HASHSTEP(h, (int) service); return (h % RPC_GSS_HASH_SIZE); } /* * Simplified interface to create a security association for the * current thread's * ucred. */ AUTH * rpc_gss_secfind(CLIENT *clnt, struct ucred *cred, const char *principal, gss_OID mech_oid, rpc_gss_service_t service) { uint32_t h, th; AUTH *auth; struct rpc_gss_data *gd, *tgd; rpc_gss_options_ret_t options; if (rpc_gss_count > RPC_GSS_MAX) { while (rpc_gss_count > RPC_GSS_MAX) { sx_xlock(&rpc_gss_lock); tgd = TAILQ_FIRST(&rpc_gss_all); th = tgd->gd_hash; TAILQ_REMOVE(&rpc_gss_cache[th], tgd, gd_link); TAILQ_REMOVE(&rpc_gss_all, tgd, gd_alllink); rpc_gss_count--; sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(tgd->gd_auth); } } /* * See if we already have an AUTH which matches. */ h = rpc_gss_hash(principal, mech_oid, cred, service); again: sx_slock(&rpc_gss_lock); TAILQ_FOREACH(gd, &rpc_gss_cache[h], gd_link) { if (gd->gd_ucred->cr_uid == cred->cr_uid && !strcmp(gd->gd_principal, principal) && gd->gd_mech == mech_oid && gd->gd_cred.gc_svc == service) { refcount_acquire(&gd->gd_refs); if (sx_try_upgrade(&rpc_gss_lock)) { /* * Keep rpc_gss_all LRU sorted. */ TAILQ_REMOVE(&rpc_gss_all, gd, gd_alllink); TAILQ_INSERT_TAIL(&rpc_gss_all, gd, gd_alllink); sx_xunlock(&rpc_gss_lock); } else { sx_sunlock(&rpc_gss_lock); } /* * If the state != ESTABLISHED, try and initialize * the authenticator again. This will happen if the * user's credentials have expired. It may succeed now, * if they have done a kinit or similar. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { memset(&options, 0, sizeof (options)); (void) rpc_gss_init(gd->gd_auth, &options); } return (gd->gd_auth); } } sx_sunlock(&rpc_gss_lock); /* * We missed in the cache - create a new association. */ auth = rpc_gss_seccreate_int(clnt, cred, NULL, principal, mech_oid, service, GSS_C_QOP_DEFAULT, NULL, NULL); if (!auth) return (NULL); gd = AUTH_PRIVATE(auth); gd->gd_hash = h; sx_xlock(&rpc_gss_lock); TAILQ_FOREACH(tgd, &rpc_gss_cache[h], gd_link) { if (tgd->gd_ucred->cr_uid == cred->cr_uid && !strcmp(tgd->gd_principal, principal) && tgd->gd_mech == mech_oid && tgd->gd_cred.gc_svc == service) { /* * We lost a race to create the AUTH that * matches this cred. */ sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(auth); goto again; } } rpc_gss_count++; TAILQ_INSERT_TAIL(&rpc_gss_cache[h], gd, gd_link); TAILQ_INSERT_TAIL(&rpc_gss_all, gd, gd_alllink); refcount_acquire(&gd->gd_refs); /* one for the cache, one for user */ sx_xunlock(&rpc_gss_lock); return (auth); } void rpc_gss_secpurge(CLIENT *clnt) { uint32_t h; struct rpc_gss_data *gd, *tgd; TAILQ_FOREACH_SAFE(gd, &rpc_gss_all, gd_alllink, tgd) { if (gd->gd_clnt == clnt) { sx_xlock(&rpc_gss_lock); h = gd->gd_hash; TAILQ_REMOVE(&rpc_gss_cache[h], gd, gd_link); TAILQ_REMOVE(&rpc_gss_all, gd, gd_alllink); rpc_gss_count--; sx_xunlock(&rpc_gss_lock); AUTH_DESTROY(gd->gd_auth); } } } AUTH * rpc_gss_seccreate(CLIENT *clnt, struct ucred *cred, const char *clnt_principal, const char *principal, const char *mechanism, rpc_gss_service_t service, const char *qop, rpc_gss_options_req_t *options_req, rpc_gss_options_ret_t *options_ret) { gss_OID oid; u_int qop_num; /* * Bail out now if we don't know this mechanism. */ if (!rpc_gss_mech_to_oid(mechanism, &oid)) return (NULL); if (qop) { if (!rpc_gss_qop_to_num(qop, mechanism, &qop_num)) return (NULL); } else { qop_num = GSS_C_QOP_DEFAULT; } return (rpc_gss_seccreate_int(clnt, cred, clnt_principal, principal, oid, service, qop_num, options_req, options_ret)); } void rpc_gss_refresh_auth(AUTH *auth) { struct rpc_gss_data *gd; rpc_gss_options_ret_t options; gd = AUTH_PRIVATE(auth); /* * If the state != ESTABLISHED, try and initialize * the authenticator again. This will happen if the * user's credentials have expired. It may succeed now, * if they have done a kinit or similar. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { memset(&options, 0, sizeof (options)); (void) rpc_gss_init(auth, &options); } } static AUTH * rpc_gss_seccreate_int(CLIENT *clnt, struct ucred *cred, const char *clnt_principal, const char *principal, gss_OID mech_oid, rpc_gss_service_t service, u_int qop_num, rpc_gss_options_req_t *options_req, rpc_gss_options_ret_t *options_ret) { AUTH *auth; rpc_gss_options_ret_t options; struct rpc_gss_data *gd; /* * If the caller doesn't want the options, point at local * storage to simplify the code below. */ if (!options_ret) options_ret = &options; /* * Default service is integrity. */ if (service == rpc_gss_svc_default) service = rpc_gss_svc_integrity; memset(options_ret, 0, sizeof(*options_ret)); rpc_gss_log_debug("in rpc_gss_seccreate()"); memset(&rpc_createerr, 0, sizeof(rpc_createerr)); auth = mem_alloc(sizeof(*auth)); if (auth == NULL) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = ENOMEM; return (NULL); } gd = mem_alloc(sizeof(*gd)); if (gd == NULL) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = ENOMEM; mem_free(auth, sizeof(*auth)); return (NULL); } auth->ah_ops = &rpc_gss_ops; auth->ah_private = (caddr_t) gd; auth->ah_cred.oa_flavor = RPCSEC_GSS; refcount_init(&gd->gd_refs, 1); mtx_init(&gd->gd_lock, "gd->gd_lock", NULL, MTX_DEF); gd->gd_auth = auth; gd->gd_ucred = crdup(cred); gd->gd_principal = strdup(principal, M_RPC); if (clnt_principal != NULL) gd->gd_clntprincipal = strdup(clnt_principal, M_RPC); else gd->gd_clntprincipal = NULL; if (options_req) { gd->gd_options = *options_req; } else { gd->gd_options.req_flags = GSS_C_MUTUAL_FLAG; gd->gd_options.time_req = 0; gd->gd_options.my_cred = GSS_C_NO_CREDENTIAL; gd->gd_options.input_channel_bindings = NULL; } CLNT_ACQUIRE(clnt); gd->gd_clnt = clnt; gd->gd_ctx = GSS_C_NO_CONTEXT; gd->gd_mech = mech_oid; gd->gd_qop = qop_num; gd->gd_cred.gc_version = RPCSEC_GSS_VERSION; gd->gd_cred.gc_proc = RPCSEC_GSS_INIT; gd->gd_cred.gc_seq = 0; gd->gd_cred.gc_svc = service; LIST_INIT(&gd->gd_reqs); if (!rpc_gss_init(auth, options_ret)) { goto bad; } return (auth); bad: AUTH_DESTROY(auth); return (NULL); } bool_t rpc_gss_set_defaults(AUTH *auth, rpc_gss_service_t service, const char *qop) { struct rpc_gss_data *gd; u_int qop_num; const char *mechanism; gd = AUTH_PRIVATE(auth); if (!rpc_gss_oid_to_mech(gd->gd_mech, &mechanism)) { return (FALSE); } if (qop) { if (!rpc_gss_qop_to_num(qop, mechanism, &qop_num)) { return (FALSE); } } else { qop_num = GSS_C_QOP_DEFAULT; } gd->gd_cred.gc_svc = service; gd->gd_qop = qop_num; return (TRUE); } static void rpc_gss_purge_xid(struct rpc_gss_data *gd, uint32_t xid) { struct rpc_pending_request *pr, *npr; struct rpc_pending_request_list reqs; LIST_INIT(&reqs); mtx_lock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &gd->gd_reqs, pr_link, npr) { if (pr->pr_xid == xid) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&reqs, pr, pr_link); } } mtx_unlock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) { mem_free(pr, sizeof(*pr)); } } static uint32_t rpc_gss_alloc_seq(struct rpc_gss_data *gd) { uint32_t seq; mtx_lock(&gd->gd_lock); seq = gd->gd_seq; gd->gd_seq++; mtx_unlock(&gd->gd_lock); return (seq); } static void rpc_gss_nextverf(__unused AUTH *auth) { /* not used */ } static bool_t rpc_gss_marshal(AUTH *auth, uint32_t xid, XDR *xdrs, struct mbuf *args) { struct rpc_gss_data *gd; struct rpc_pending_request *pr; uint32_t seq; XDR tmpxdrs; struct rpc_gss_cred gsscred; char credbuf[MAX_AUTH_BYTES]; struct opaque_auth creds, verf; gss_buffer_desc rpcbuf, checksum; OM_uint32 maj_stat, min_stat; bool_t xdr_stat; rpc_gss_log_debug("in rpc_gss_marshal()"); gd = AUTH_PRIVATE(auth); gsscred = gd->gd_cred; seq = rpc_gss_alloc_seq(gd); gsscred.gc_seq = seq; xdrmem_create(&tmpxdrs, credbuf, sizeof(credbuf), XDR_ENCODE); if (!xdr_rpc_gss_cred(&tmpxdrs, &gsscred)) { XDR_DESTROY(&tmpxdrs); _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } creds.oa_flavor = RPCSEC_GSS; creds.oa_base = credbuf; creds.oa_length = XDR_GETPOS(&tmpxdrs); XDR_DESTROY(&tmpxdrs); xdr_opaque_auth(xdrs, &creds); if (gd->gd_cred.gc_proc == RPCSEC_GSS_INIT || gd->gd_cred.gc_proc == RPCSEC_GSS_CONTINUE_INIT) { if (!xdr_opaque_auth(xdrs, &_null_auth)) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } xdrmbuf_append(xdrs, args); return (TRUE); } else { /* * Keep track of this XID + seq pair so that we can do * the matching gss_verify_mic in AUTH_VALIDATE. */ pr = mem_alloc(sizeof(struct rpc_pending_request)); mtx_lock(&gd->gd_lock); pr->pr_xid = xid; pr->pr_seq = seq; LIST_INSERT_HEAD(&gd->gd_reqs, pr, pr_link); mtx_unlock(&gd->gd_lock); /* * Checksum serialized RPC header, up to and including * credential. For the in-kernel environment, we * assume that our XDR stream is on a contiguous * memory buffer (e.g. an mbuf). */ rpcbuf.length = XDR_GETPOS(xdrs); XDR_SETPOS(xdrs, 0); rpcbuf.value = XDR_INLINE(xdrs, rpcbuf.length); maj_stat = gss_get_mic(&min_stat, gd->gd_ctx, gd->gd_qop, &rpcbuf, &checksum); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_get_mic", gd->gd_mech, maj_stat, min_stat); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); } _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); return (FALSE); } verf.oa_flavor = RPCSEC_GSS; verf.oa_base = checksum.value; verf.oa_length = checksum.length; xdr_stat = xdr_opaque_auth(xdrs, &verf); gss_release_buffer(&min_stat, &checksum); if (!xdr_stat) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } if (gd->gd_state != RPCSEC_GSS_ESTABLISHED || gd->gd_cred.gc_svc == rpc_gss_svc_none) { xdrmbuf_append(xdrs, args); return (TRUE); } else { if (!xdr_rpc_gss_wrap_data(&args, gd->gd_ctx, gd->gd_qop, gd->gd_cred.gc_svc, seq)) return (FALSE); xdrmbuf_append(xdrs, args); return (TRUE); } } return (TRUE); } static bool_t rpc_gss_validate(AUTH *auth, uint32_t xid, struct opaque_auth *verf, struct mbuf **resultsp) { struct rpc_gss_data *gd; struct rpc_pending_request *pr, *npr; struct rpc_pending_request_list reqs; gss_qop_t qop_state; uint32_t num, seq; gss_buffer_desc signbuf, checksum; OM_uint32 maj_stat, min_stat; rpc_gss_log_debug("in rpc_gss_validate()"); gd = AUTH_PRIVATE(auth); /* * The client will call us with a NULL verf when it gives up * on an XID. */ if (!verf) { rpc_gss_purge_xid(gd, xid); return (TRUE); } if (gd->gd_state == RPCSEC_GSS_CONTEXT) { /* * Save the on the wire verifier to validate last INIT * phase packet after decode if the major status is * GSS_S_COMPLETE. */ if (gd->gd_verf.value) xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); gd->gd_verf.value = mem_alloc(verf->oa_length); if (gd->gd_verf.value == NULL) { printf("gss_validate: out of memory\n"); _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); m_freem(*resultsp); *resultsp = NULL; return (FALSE); } memcpy(gd->gd_verf.value, verf->oa_base, verf->oa_length); gd->gd_verf.length = verf->oa_length; return (TRUE); } /* * We need to check the verifier against all the requests * we've send for this XID - for unreliable protocols, we * retransmit with the same XID but different sequence * number. We temporarily take this set of requests out of the * list so that we can work through the list without having to * hold the lock. */ mtx_lock(&gd->gd_lock); LIST_INIT(&reqs); LIST_FOREACH_SAFE(pr, &gd->gd_reqs, pr_link, npr) { if (pr->pr_xid == xid) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&reqs, pr, pr_link); } } mtx_unlock(&gd->gd_lock); LIST_FOREACH(pr, &reqs, pr_link) { if (pr->pr_xid == xid) { seq = pr->pr_seq; num = htonl(seq); signbuf.value = # signbuf.length = sizeof(num); checksum.value = verf->oa_base; checksum.length = verf->oa_length; maj_stat = gss_verify_mic(&min_stat, gd->gd_ctx, &signbuf, &checksum, &qop_state); if (maj_stat != GSS_S_COMPLETE || qop_state != gd->gd_qop) { continue; } if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); break; } //rpc_gss_purge_reqs(gd, seq); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) mem_free(pr, sizeof(*pr)); if (gd->gd_cred.gc_svc == rpc_gss_svc_none) { return (TRUE); } else { if (!xdr_rpc_gss_unwrap_data(resultsp, gd->gd_ctx, gd->gd_qop, gd->gd_cred.gc_svc, seq)) { return (FALSE); } } return (TRUE); } } /* * We didn't match - put back any entries for this XID so that * a future call to validate can retry. */ mtx_lock(&gd->gd_lock); LIST_FOREACH_SAFE(pr, &reqs, pr_link, npr) { LIST_REMOVE(pr, pr_link); LIST_INSERT_HEAD(&gd->gd_reqs, pr, pr_link); } mtx_unlock(&gd->gd_lock); /* * Nothing matches - give up. */ _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); m_freem(*resultsp); *resultsp = NULL; return (FALSE); } static bool_t rpc_gss_init(AUTH *auth, rpc_gss_options_ret_t *options_ret) { struct thread *td = curthread; struct ucred *crsave; struct rpc_gss_data *gd; struct rpc_gss_init_res gr; gss_buffer_desc principal_desc; gss_buffer_desc *recv_tokenp, recv_token, send_token; gss_name_t name; OM_uint32 maj_stat, min_stat, call_stat; const char *mech; struct rpc_callextra ext; gss_OID mech_oid; gss_OID_set mechlist; rpc_gss_log_debug("in rpc_gss_refresh()"); gd = AUTH_PRIVATE(auth); mtx_lock(&gd->gd_lock); /* * If the context isn't in START state, someone else is * refreshing - we wait till they are done. If they fail, they * will put the state back to START and we can try (most * likely to also fail). */ while (gd->gd_state != RPCSEC_GSS_START && gd->gd_state != RPCSEC_GSS_ESTABLISHED) { msleep(gd, &gd->gd_lock, 0, "gssstate", 0); } if (gd->gd_state == RPCSEC_GSS_ESTABLISHED) { mtx_unlock(&gd->gd_lock); return (TRUE); } gd->gd_state = RPCSEC_GSS_CONTEXT; mtx_unlock(&gd->gd_lock); gd->gd_cred.gc_proc = RPCSEC_GSS_INIT; gd->gd_cred.gc_seq = 0; /* * For KerberosV, if there is a client principal name, that implies * that this is a host based initiator credential in the default * keytab file. For this case, it is necessary to do a * gss_acquire_cred(). When this is done, the gssd daemon will * do the equivalent of "kinit -k" to put a TGT for the name in * the credential cache file for the gssd daemon. */ if (gd->gd_clntprincipal != NULL && rpc_gss_mech_to_oid("kerberosv5", &mech_oid) && gd->gd_mech == mech_oid) { /* Get rid of any old credential. */ if (gd->gd_options.my_cred != GSS_C_NO_CREDENTIAL) { gss_release_cred(&min_stat, &gd->gd_options.my_cred); gd->gd_options.my_cred = GSS_C_NO_CREDENTIAL; } /* * The mechanism must be set to KerberosV for acquisition * of credentials to work reliably. */ maj_stat = gss_create_empty_oid_set(&min_stat, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } maj_stat = gss_add_oid_set_member(&min_stat, gd->gd_mech, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; gss_release_oid_set(&min_stat, &mechlist); goto out; } principal_desc.value = (void *)gd->gd_clntprincipal; principal_desc.length = strlen(gd->gd_clntprincipal); maj_stat = gss_import_name(&min_stat, &principal_desc, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; gss_release_oid_set(&min_stat, &mechlist); goto out; } /* Acquire the credentials. */ maj_stat = gss_acquire_cred(&min_stat, name, 0, mechlist, GSS_C_INITIATE, &gd->gd_options.my_cred, NULL, NULL); gss_release_name(&min_stat, &name); gss_release_oid_set(&min_stat, &mechlist); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } } principal_desc.value = (void *)gd->gd_principal; principal_desc.length = strlen(gd->gd_principal); maj_stat = gss_import_name(&min_stat, &principal_desc, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) { options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; goto out; } /* GSS context establishment loop. */ memset(&recv_token, 0, sizeof(recv_token)); memset(&gr, 0, sizeof(gr)); memset(options_ret, 0, sizeof(*options_ret)); options_ret->major_status = GSS_S_FAILURE; recv_tokenp = GSS_C_NO_BUFFER; for (;;) { crsave = td->td_ucred; td->td_ucred = gd->gd_ucred; maj_stat = gss_init_sec_context(&min_stat, gd->gd_options.my_cred, &gd->gd_ctx, name, gd->gd_mech, gd->gd_options.req_flags, gd->gd_options.time_req, gd->gd_options.input_channel_bindings, recv_tokenp, &gd->gd_mech, /* used mech */ &send_token, &options_ret->ret_flags, &options_ret->time_req); td->td_ucred = crsave; /* * Free the token which we got from the server (if * any). Remember that this was allocated by XDR, not * GSS-API. */ if (recv_tokenp != GSS_C_NO_BUFFER) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_token); recv_tokenp = GSS_C_NO_BUFFER; } if (gd->gd_mech && rpc_gss_oid_to_mech(gd->gd_mech, &mech)) { strlcpy(options_ret->actual_mechanism, mech, sizeof(options_ret->actual_mechanism)); } if (maj_stat != GSS_S_COMPLETE && maj_stat != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("gss_init_sec_context", gd->gd_mech, maj_stat, min_stat); options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; break; } if (send_token.length != 0) { memset(&gr, 0, sizeof(gr)); bzero(&ext, sizeof(ext)); ext.rc_auth = auth; call_stat = CLNT_CALL_EXT(gd->gd_clnt, &ext, NULLPROC, (xdrproc_t)xdr_gss_buffer_desc, &send_token, (xdrproc_t)xdr_rpc_gss_init_res, (caddr_t)&gr, AUTH_TIMEOUT); gss_release_buffer(&min_stat, &send_token); if (call_stat != RPC_SUCCESS) break; if (gr.gr_major != GSS_S_COMPLETE && gr.gr_major != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("server reply", gd->gd_mech, gr.gr_major, gr.gr_minor); options_ret->major_status = gr.gr_major; options_ret->minor_status = gr.gr_minor; break; } /* * Save the server's gr_handle value, freeing * what we have already (remember that this * was allocated by XDR, not GSS-API). */ if (gr.gr_handle.length != 0) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_cred.gc_handle); gd->gd_cred.gc_handle = gr.gr_handle; } /* * Save the server's token as well. */ if (gr.gr_token.length != 0) { recv_token = gr.gr_token; recv_tokenp = &recv_token; } /* * Since we have copied out all the bits of gr * which XDR allocated for us, we don't need * to free it. */ gd->gd_cred.gc_proc = RPCSEC_GSS_CONTINUE_INIT; } if (maj_stat == GSS_S_COMPLETE) { gss_buffer_desc bufin; u_int seq, qop_state = 0; /* * gss header verifier, * usually checked in gss_validate */ seq = htonl(gr.gr_win); bufin.value = (unsigned char *)&seq; bufin.length = sizeof(seq); maj_stat = gss_verify_mic(&min_stat, gd->gd_ctx, &bufin, &gd->gd_verf, &qop_state); if (maj_stat != GSS_S_COMPLETE || qop_state != gd->gd_qop) { rpc_gss_log_status("gss_verify_mic", gd->gd_mech, maj_stat, min_stat); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { rpc_gss_destroy_context(auth, TRUE); } _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); options_ret->major_status = maj_stat; options_ret->minor_status = min_stat; break; } options_ret->major_status = GSS_S_COMPLETE; options_ret->minor_status = 0; options_ret->rpcsec_version = gd->gd_cred.gc_version; options_ret->gss_context = gd->gd_ctx; gd->gd_cred.gc_proc = RPCSEC_GSS_DATA; gd->gd_seq = 1; gd->gd_win = gr.gr_win; break; } } gss_release_name(&min_stat, &name); xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); out: /* End context negotiation loop. */ if (gd->gd_cred.gc_proc != RPCSEC_GSS_DATA) { rpc_createerr.cf_stat = RPC_AUTHERROR; _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, EPERM); if (gd->gd_ctx) { gss_delete_sec_context(&min_stat, &gd->gd_ctx, GSS_C_NO_BUFFER); } mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_START; wakeup(gd); mtx_unlock(&gd->gd_lock); return (FALSE); } mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_ESTABLISHED; wakeup(gd); mtx_unlock(&gd->gd_lock); return (TRUE); } static bool_t rpc_gss_refresh(AUTH *auth, void *msg) { struct rpc_msg *reply = (struct rpc_msg *) msg; rpc_gss_options_ret_t options; struct rpc_gss_data *gd; gd = AUTH_PRIVATE(auth); /* * If the context is in DESTROYING state, then just return, since * there is no point in refreshing the credentials. */ mtx_lock(&gd->gd_lock); if (gd->gd_state == RPCSEC_GSS_DESTROYING) { mtx_unlock(&gd->gd_lock); return (FALSE); } mtx_unlock(&gd->gd_lock); /* * If the error was RPCSEC_GSS_CREDPROBLEM of * RPCSEC_GSS_CTXPROBLEM we start again from scratch. All * other errors are fatal. */ if (reply->rm_reply.rp_stat == MSG_DENIED && reply->rm_reply.rp_rjct.rj_stat == AUTH_ERROR && (reply->rm_reply.rp_rjct.rj_why == RPCSEC_GSS_CREDPROBLEM || reply->rm_reply.rp_rjct.rj_why == RPCSEC_GSS_CTXPROBLEM)) { rpc_gss_destroy_context(auth, FALSE); memset(&options, 0, sizeof(options)); return (rpc_gss_init(auth, &options)); } return (FALSE); } static void rpc_gss_destroy_context(AUTH *auth, bool_t send_destroy) { struct rpc_gss_data *gd; struct rpc_pending_request *pr; OM_uint32 min_stat; struct rpc_callextra ext; rpc_gss_log_debug("in rpc_gss_destroy_context()"); gd = AUTH_PRIVATE(auth); mtx_lock(&gd->gd_lock); /* * If the context isn't in ESTABISHED state, someone else is * destroying/refreshing - we wait till they are done. */ if (gd->gd_state != RPCSEC_GSS_ESTABLISHED) { while (gd->gd_state != RPCSEC_GSS_START && gd->gd_state != RPCSEC_GSS_ESTABLISHED) msleep(gd, &gd->gd_lock, 0, "gssstate", 0); mtx_unlock(&gd->gd_lock); return; } gd->gd_state = RPCSEC_GSS_DESTROYING; mtx_unlock(&gd->gd_lock); if (send_destroy) { gd->gd_cred.gc_proc = RPCSEC_GSS_DESTROY; bzero(&ext, sizeof(ext)); ext.rc_auth = auth; CLNT_CALL_EXT(gd->gd_clnt, &ext, NULLPROC, (xdrproc_t)xdr_void, NULL, (xdrproc_t)xdr_void, NULL, AUTH_TIMEOUT); } while ((pr = LIST_FIRST(&gd->gd_reqs)) != NULL) { LIST_REMOVE(pr, pr_link); mem_free(pr, sizeof(*pr)); } /* * Free the context token. Remember that this was * allocated by XDR, not GSS-API. */ xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_cred.gc_handle); gd->gd_cred.gc_handle.length = 0; if (gd->gd_ctx != GSS_C_NO_CONTEXT) gss_delete_sec_context(&min_stat, &gd->gd_ctx, NULL); mtx_lock(&gd->gd_lock); gd->gd_state = RPCSEC_GSS_START; wakeup(gd); mtx_unlock(&gd->gd_lock); } static void rpc_gss_destroy(AUTH *auth) { struct rpc_gss_data *gd; rpc_gss_log_debug("in rpc_gss_destroy()"); gd = AUTH_PRIVATE(auth); if (!refcount_release(&gd->gd_refs)) return; rpc_gss_destroy_context(auth, TRUE); CLNT_RELEASE(gd->gd_clnt); crfree(gd->gd_ucred); free(gd->gd_principal, M_RPC); if (gd->gd_clntprincipal != NULL) free(gd->gd_clntprincipal, M_RPC); if (gd->gd_verf.value) xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &gd->gd_verf); mtx_destroy(&gd->gd_lock); mem_free(gd, sizeof(*gd)); mem_free(auth, sizeof(*auth)); } int rpc_gss_max_data_length(AUTH *auth, int max_tp_unit_len) { struct rpc_gss_data *gd; int want_conf; OM_uint32 max; OM_uint32 maj_stat, min_stat; int result; gd = AUTH_PRIVATE(auth); switch (gd->gd_cred.gc_svc) { case rpc_gss_svc_none: return (max_tp_unit_len); break; case rpc_gss_svc_default: case rpc_gss_svc_integrity: want_conf = FALSE; break; case rpc_gss_svc_privacy: want_conf = TRUE; break; default: return (0); } maj_stat = gss_wrap_size_limit(&min_stat, gd->gd_ctx, want_conf, gd->gd_qop, max_tp_unit_len, &max); if (maj_stat == GSS_S_COMPLETE) { result = (int) max; if (result < 0) result = 0; return (result); } else { rpc_gss_log_status("gss_wrap_size_limit", gd->gd_mech, maj_stat, min_stat); return (0); } } diff --git a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c index 36ab8c5bcf87..c9a09438d907 100644 --- a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c +++ b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c @@ -1,1579 +1,1579 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 1990 The Regents of the University of California. * * Copyright (c) 2008 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* svc_rpcsec_gss.c Copyright (c) 2000 The Regents of the University of Michigan. All rights reserved. Copyright (c) 2000 Dug Song . All rights reserved, all wrongs reversed. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $Id: svc_auth_gss.c,v 1.27 2002/01/15 15:43:00 andros Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rpcsec_gss_int.h" static bool_t svc_rpc_gss_wrap(SVCAUTH *, struct mbuf **); static bool_t svc_rpc_gss_unwrap(SVCAUTH *, struct mbuf **); static void svc_rpc_gss_release(SVCAUTH *); static enum auth_stat svc_rpc_gss(struct svc_req *, struct rpc_msg *); static int rpc_gss_svc_getcred(struct svc_req *, struct ucred **, int *); -static struct svc_auth_ops svc_auth_gss_ops = { - svc_rpc_gss_wrap, - svc_rpc_gss_unwrap, - svc_rpc_gss_release, +static const struct svc_auth_ops svc_auth_gss_ops = { + .svc_ah_wrap = svc_rpc_gss_wrap, + .svc_ah_unwrap = svc_rpc_gss_unwrap, + .svc_ah_release = svc_rpc_gss_release, }; struct sx svc_rpc_gss_lock; struct svc_rpc_gss_callback { SLIST_ENTRY(svc_rpc_gss_callback) cb_link; rpc_gss_callback_t cb_callback; }; static SLIST_HEAD(svc_rpc_gss_callback_list, svc_rpc_gss_callback) svc_rpc_gss_callbacks = SLIST_HEAD_INITIALIZER(svc_rpc_gss_callbacks); struct svc_rpc_gss_svc_name { SLIST_ENTRY(svc_rpc_gss_svc_name) sn_link; char *sn_principal; gss_OID sn_mech; u_int sn_req_time; gss_cred_id_t sn_cred; u_int sn_program; u_int sn_version; }; static SLIST_HEAD(svc_rpc_gss_svc_name_list, svc_rpc_gss_svc_name) svc_rpc_gss_svc_names = SLIST_HEAD_INITIALIZER(svc_rpc_gss_svc_names); enum svc_rpc_gss_client_state { CLIENT_NEW, /* still authenticating */ CLIENT_ESTABLISHED, /* context established */ CLIENT_STALE /* garbage to collect */ }; #define SVC_RPC_GSS_SEQWINDOW 128 struct svc_rpc_gss_clientid { unsigned long ci_hostid; uint32_t ci_boottime; uint32_t ci_id; }; struct svc_rpc_gss_client { TAILQ_ENTRY(svc_rpc_gss_client) cl_link; TAILQ_ENTRY(svc_rpc_gss_client) cl_alllink; volatile u_int cl_refs; struct sx cl_lock; struct svc_rpc_gss_clientid cl_id; time_t cl_expiration; /* when to gc */ enum svc_rpc_gss_client_state cl_state; /* client state */ bool_t cl_locked; /* fixed service+qop */ gss_ctx_id_t cl_ctx; /* context id */ gss_cred_id_t cl_creds; /* delegated creds */ gss_name_t cl_cname; /* client name */ struct svc_rpc_gss_svc_name *cl_sname; /* server name used */ rpc_gss_rawcred_t cl_rawcred; /* raw credentials */ rpc_gss_ucred_t cl_ucred; /* unix-style credentials */ struct ucred *cl_cred; /* kernel-style credentials */ int cl_rpcflavor; /* RPC pseudo sec flavor */ bool_t cl_done_callback; /* TRUE after call */ void *cl_cookie; /* user cookie from callback */ gid_t cl_gid_storage[NGROUPS]; gss_OID cl_mech; /* mechanism */ gss_qop_t cl_qop; /* quality of protection */ uint32_t cl_seqlast; /* sequence window origin */ uint32_t cl_seqmask[SVC_RPC_GSS_SEQWINDOW/32]; /* bitmask of seqnums */ }; TAILQ_HEAD(svc_rpc_gss_client_list, svc_rpc_gss_client); /* * This structure holds enough information to unwrap arguments or wrap * results for a given request. We use the rq_clntcred area for this * (which is a per-request buffer). */ struct svc_rpc_gss_cookedcred { struct svc_rpc_gss_client *cc_client; rpc_gss_service_t cc_service; uint32_t cc_seq; }; #define CLIENT_HASH_SIZE 256 #define CLIENT_MAX 1024 u_int svc_rpc_gss_client_max = CLIENT_MAX; u_int svc_rpc_gss_client_hash_size = CLIENT_HASH_SIZE; SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "RPC"); SYSCTL_NODE(_kern_rpc, OID_AUTO, gss, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GSS"); SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_max, CTLFLAG_RW, &svc_rpc_gss_client_max, 0, "Max number of rpc-gss clients"); SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_hash, CTLFLAG_RDTUN, &svc_rpc_gss_client_hash_size, 0, "Size of rpc-gss client hash table"); static u_int svc_rpc_gss_lifetime_max = 0; SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, lifetime_max, CTLFLAG_RW, &svc_rpc_gss_lifetime_max, 0, "Maximum lifetime (seconds) of rpc-gss clients"); static u_int svc_rpc_gss_client_count; SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_count, CTLFLAG_RD, &svc_rpc_gss_client_count, 0, "Number of rpc-gss clients"); struct svc_rpc_gss_client_list *svc_rpc_gss_client_hash; struct svc_rpc_gss_client_list svc_rpc_gss_clients; static uint32_t svc_rpc_gss_next_clientid = 1; static void svc_rpc_gss_init(void *arg) { int i; svc_rpc_gss_client_hash = mem_alloc(sizeof(struct svc_rpc_gss_client_list) * svc_rpc_gss_client_hash_size); for (i = 0; i < svc_rpc_gss_client_hash_size; i++) TAILQ_INIT(&svc_rpc_gss_client_hash[i]); TAILQ_INIT(&svc_rpc_gss_clients); svc_auth_reg(RPCSEC_GSS, svc_rpc_gss, rpc_gss_svc_getcred); sx_init(&svc_rpc_gss_lock, "gsslock"); } SYSINIT(svc_rpc_gss_init, SI_SUB_KMEM, SI_ORDER_ANY, svc_rpc_gss_init, NULL); bool_t rpc_gss_set_callback(rpc_gss_callback_t *cb) { struct svc_rpc_gss_callback *scb; scb = mem_alloc(sizeof(struct svc_rpc_gss_callback)); if (!scb) { _rpc_gss_set_error(RPC_GSS_ER_SYSTEMERROR, ENOMEM); return (FALSE); } scb->cb_callback = *cb; sx_xlock(&svc_rpc_gss_lock); SLIST_INSERT_HEAD(&svc_rpc_gss_callbacks, scb, cb_link); sx_xunlock(&svc_rpc_gss_lock); return (TRUE); } void rpc_gss_clear_callback(rpc_gss_callback_t *cb) { struct svc_rpc_gss_callback *scb; sx_xlock(&svc_rpc_gss_lock); SLIST_FOREACH(scb, &svc_rpc_gss_callbacks, cb_link) { if (scb->cb_callback.program == cb->program && scb->cb_callback.version == cb->version && scb->cb_callback.callback == cb->callback) { SLIST_REMOVE(&svc_rpc_gss_callbacks, scb, svc_rpc_gss_callback, cb_link); sx_xunlock(&svc_rpc_gss_lock); mem_free(scb, sizeof(*scb)); return; } } sx_xunlock(&svc_rpc_gss_lock); } static bool_t rpc_gss_acquire_svc_cred(struct svc_rpc_gss_svc_name *sname) { OM_uint32 maj_stat, min_stat; gss_buffer_desc namebuf; gss_name_t name; gss_OID_set_desc oid_set; oid_set.count = 1; oid_set.elements = sname->sn_mech; namebuf.value = (void *) sname->sn_principal; namebuf.length = strlen(sname->sn_principal); maj_stat = gss_import_name(&min_stat, &namebuf, GSS_C_NT_HOSTBASED_SERVICE, &name); if (maj_stat != GSS_S_COMPLETE) return (FALSE); if (sname->sn_cred != GSS_C_NO_CREDENTIAL) gss_release_cred(&min_stat, &sname->sn_cred); maj_stat = gss_acquire_cred(&min_stat, name, sname->sn_req_time, &oid_set, GSS_C_ACCEPT, &sname->sn_cred, NULL, NULL); if (maj_stat != GSS_S_COMPLETE) { gss_release_name(&min_stat, &name); return (FALSE); } gss_release_name(&min_stat, &name); return (TRUE); } bool_t rpc_gss_set_svc_name(const char *principal, const char *mechanism, u_int req_time, u_int program, u_int version) { struct svc_rpc_gss_svc_name *sname; gss_OID mech_oid; if (!rpc_gss_mech_to_oid(mechanism, &mech_oid)) return (FALSE); sname = mem_alloc(sizeof(*sname)); if (!sname) return (FALSE); sname->sn_principal = strdup(principal, M_RPC); sname->sn_mech = mech_oid; sname->sn_req_time = req_time; sname->sn_cred = GSS_C_NO_CREDENTIAL; sname->sn_program = program; sname->sn_version = version; if (!rpc_gss_acquire_svc_cred(sname)) { free(sname->sn_principal, M_RPC); mem_free(sname, sizeof(*sname)); return (FALSE); } sx_xlock(&svc_rpc_gss_lock); SLIST_INSERT_HEAD(&svc_rpc_gss_svc_names, sname, sn_link); sx_xunlock(&svc_rpc_gss_lock); return (TRUE); } void rpc_gss_clear_svc_name(u_int program, u_int version) { OM_uint32 min_stat; struct svc_rpc_gss_svc_name *sname; sx_xlock(&svc_rpc_gss_lock); SLIST_FOREACH(sname, &svc_rpc_gss_svc_names, sn_link) { if (sname->sn_program == program && sname->sn_version == version) { SLIST_REMOVE(&svc_rpc_gss_svc_names, sname, svc_rpc_gss_svc_name, sn_link); sx_xunlock(&svc_rpc_gss_lock); gss_release_cred(&min_stat, &sname->sn_cred); free(sname->sn_principal, M_RPC); mem_free(sname, sizeof(*sname)); return; } } sx_xunlock(&svc_rpc_gss_lock); } bool_t rpc_gss_get_principal_name(rpc_gss_principal_t *principal, const char *mech, const char *name, const char *node, const char *domain) { OM_uint32 maj_stat, min_stat; gss_OID mech_oid; size_t namelen; gss_buffer_desc buf; gss_name_t gss_name, gss_mech_name; rpc_gss_principal_t result; if (!rpc_gss_mech_to_oid(mech, &mech_oid)) return (FALSE); /* * Construct a gss_buffer containing the full name formatted * as "name/node@domain" where node and domain are optional. */ namelen = strlen(name) + 1; if (node) { namelen += strlen(node) + 1; } if (domain) { namelen += strlen(domain) + 1; } buf.value = mem_alloc(namelen); buf.length = namelen; strcpy((char *) buf.value, name); if (node) { strcat((char *) buf.value, "/"); strcat((char *) buf.value, node); } if (domain) { strcat((char *) buf.value, "@"); strcat((char *) buf.value, domain); } /* * Convert that to a gss_name_t and then convert that to a * mechanism name in the selected mechanism. */ maj_stat = gss_import_name(&min_stat, &buf, GSS_C_NT_USER_NAME, &gss_name); mem_free(buf.value, buf.length); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_import_name", mech_oid, maj_stat, min_stat); return (FALSE); } maj_stat = gss_canonicalize_name(&min_stat, gss_name, mech_oid, &gss_mech_name); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_canonicalize_name", mech_oid, maj_stat, min_stat); gss_release_name(&min_stat, &gss_name); return (FALSE); } gss_release_name(&min_stat, &gss_name); /* * Export the mechanism name and use that to construct the * rpc_gss_principal_t result. */ maj_stat = gss_export_name(&min_stat, gss_mech_name, &buf); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_export_name", mech_oid, maj_stat, min_stat); gss_release_name(&min_stat, &gss_mech_name); return (FALSE); } gss_release_name(&min_stat, &gss_mech_name); result = mem_alloc(sizeof(int) + buf.length); if (!result) { gss_release_buffer(&min_stat, &buf); return (FALSE); } result->len = buf.length; memcpy(result->name, buf.value, buf.length); gss_release_buffer(&min_stat, &buf); *principal = result; return (TRUE); } bool_t rpc_gss_getcred(struct svc_req *req, rpc_gss_rawcred_t **rcred, rpc_gss_ucred_t **ucred, void **cookie) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; if (req->rq_cred.oa_flavor != RPCSEC_GSS) return (FALSE); cc = req->rq_clntcred; client = cc->cc_client; if (rcred) *rcred = &client->cl_rawcred; if (ucred) *ucred = &client->cl_ucred; if (cookie) *cookie = client->cl_cookie; return (TRUE); } /* * This simpler interface is used by svc_getcred to copy the cred data * into a kernel cred structure. */ static int rpc_gss_svc_getcred(struct svc_req *req, struct ucred **crp, int *flavorp) { struct ucred *cr; struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_ucred_t *uc; if (req->rq_cred.oa_flavor != RPCSEC_GSS) return (FALSE); cc = req->rq_clntcred; client = cc->cc_client; if (flavorp) *flavorp = client->cl_rpcflavor; if (client->cl_cred) { *crp = crhold(client->cl_cred); return (TRUE); } uc = &client->cl_ucred; cr = client->cl_cred = crget(); cr->cr_uid = cr->cr_ruid = cr->cr_svuid = uc->uid; cr->cr_rgid = cr->cr_svgid = uc->gid; crsetgroups(cr, uc->gidlen, uc->gidlist); cr->cr_prison = &prison0; prison_hold(cr->cr_prison); *crp = crhold(cr); return (TRUE); } int rpc_gss_svc_max_data_length(struct svc_req *req, int max_tp_unit_len) { struct svc_rpc_gss_cookedcred *cc = req->rq_clntcred; struct svc_rpc_gss_client *client = cc->cc_client; int want_conf; OM_uint32 max; OM_uint32 maj_stat, min_stat; int result; switch (client->cl_rawcred.service) { case rpc_gss_svc_none: return (max_tp_unit_len); break; case rpc_gss_svc_default: case rpc_gss_svc_integrity: want_conf = FALSE; break; case rpc_gss_svc_privacy: want_conf = TRUE; break; default: return (0); } maj_stat = gss_wrap_size_limit(&min_stat, client->cl_ctx, want_conf, client->cl_qop, max_tp_unit_len, &max); if (maj_stat == GSS_S_COMPLETE) { result = (int) max; if (result < 0) result = 0; return (result); } else { rpc_gss_log_status("gss_wrap_size_limit", client->cl_mech, maj_stat, min_stat); return (0); } } static struct svc_rpc_gss_client * svc_rpc_gss_find_client(struct svc_rpc_gss_clientid *id) { struct svc_rpc_gss_client *client; struct svc_rpc_gss_client_list *list; struct timeval boottime; unsigned long hostid; rpc_gss_log_debug("in svc_rpc_gss_find_client(%d)", id->ci_id); getcredhostid(curthread->td_ucred, &hostid); getboottime(&boottime); if (id->ci_hostid != hostid || id->ci_boottime != boottime.tv_sec) return (NULL); list = &svc_rpc_gss_client_hash[id->ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_FOREACH(client, list, cl_link) { if (client->cl_id.ci_id == id->ci_id) { /* * Move this client to the front of the LRU * list. */ TAILQ_REMOVE(&svc_rpc_gss_clients, client, cl_alllink); TAILQ_INSERT_HEAD(&svc_rpc_gss_clients, client, cl_alllink); refcount_acquire(&client->cl_refs); break; } } sx_xunlock(&svc_rpc_gss_lock); return (client); } static struct svc_rpc_gss_client * svc_rpc_gss_create_client(void) { struct svc_rpc_gss_client *client; struct svc_rpc_gss_client_list *list; struct timeval boottime; unsigned long hostid; rpc_gss_log_debug("in svc_rpc_gss_create_client()"); client = mem_alloc(sizeof(struct svc_rpc_gss_client)); memset(client, 0, sizeof(struct svc_rpc_gss_client)); /* * Set the initial value of cl_refs to two. One for the caller * and the other to hold onto the client structure until it expires. */ refcount_init(&client->cl_refs, 2); sx_init(&client->cl_lock, "GSS-client"); getcredhostid(curthread->td_ucred, &hostid); client->cl_id.ci_hostid = hostid; getboottime(&boottime); client->cl_id.ci_boottime = boottime.tv_sec; client->cl_id.ci_id = svc_rpc_gss_next_clientid++; /* * Start the client off with a short expiration time. We will * try to get a saner value from the client creds later. */ client->cl_state = CLIENT_NEW; client->cl_locked = FALSE; client->cl_expiration = time_uptime + 5*60; list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_INSERT_HEAD(list, client, cl_link); TAILQ_INSERT_HEAD(&svc_rpc_gss_clients, client, cl_alllink); svc_rpc_gss_client_count++; sx_xunlock(&svc_rpc_gss_lock); return (client); } static void svc_rpc_gss_destroy_client(struct svc_rpc_gss_client *client) { OM_uint32 min_stat; rpc_gss_log_debug("in svc_rpc_gss_destroy_client()"); if (client->cl_ctx) gss_delete_sec_context(&min_stat, &client->cl_ctx, GSS_C_NO_BUFFER); if (client->cl_cname) gss_release_name(&min_stat, &client->cl_cname); if (client->cl_rawcred.client_principal) mem_free(client->cl_rawcred.client_principal, sizeof(*client->cl_rawcred.client_principal) + client->cl_rawcred.client_principal->len); if (client->cl_cred) crfree(client->cl_cred); sx_destroy(&client->cl_lock); mem_free(client, sizeof(*client)); } /* * Drop a reference to a client and free it if that was the last reference. */ static void svc_rpc_gss_release_client(struct svc_rpc_gss_client *client) { if (!refcount_release(&client->cl_refs)) return; svc_rpc_gss_destroy_client(client); } /* * Remove a client from our global lists. * Must be called with svc_rpc_gss_lock held. */ static void svc_rpc_gss_forget_client_locked(struct svc_rpc_gss_client *client) { struct svc_rpc_gss_client_list *list; sx_assert(&svc_rpc_gss_lock, SX_XLOCKED); list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; TAILQ_REMOVE(list, client, cl_link); TAILQ_REMOVE(&svc_rpc_gss_clients, client, cl_alllink); svc_rpc_gss_client_count--; } /* * Remove a client from our global lists and free it if we can. */ static void svc_rpc_gss_forget_client(struct svc_rpc_gss_client *client) { struct svc_rpc_gss_client_list *list; struct svc_rpc_gss_client *tclient; list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size]; sx_xlock(&svc_rpc_gss_lock); TAILQ_FOREACH(tclient, list, cl_link) { /* * Make sure this client has not already been removed * from the lists by svc_rpc_gss_forget_client() or * svc_rpc_gss_forget_client_locked(). */ if (client == tclient) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); svc_rpc_gss_release_client(client); return; } } sx_xunlock(&svc_rpc_gss_lock); } static void svc_rpc_gss_timeout_clients(void) { struct svc_rpc_gss_client *client; time_t now = time_uptime; rpc_gss_log_debug("in svc_rpc_gss_timeout_clients()"); /* * First enforce the max client limit. We keep * svc_rpc_gss_clients in LRU order. */ sx_xlock(&svc_rpc_gss_lock); client = TAILQ_LAST(&svc_rpc_gss_clients, svc_rpc_gss_client_list); while (svc_rpc_gss_client_count > svc_rpc_gss_client_max && client != NULL) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); svc_rpc_gss_release_client(client); sx_xlock(&svc_rpc_gss_lock); client = TAILQ_LAST(&svc_rpc_gss_clients, svc_rpc_gss_client_list); } again: TAILQ_FOREACH(client, &svc_rpc_gss_clients, cl_alllink) { if (client->cl_state == CLIENT_STALE || now > client->cl_expiration) { svc_rpc_gss_forget_client_locked(client); sx_xunlock(&svc_rpc_gss_lock); rpc_gss_log_debug("expiring client %p", client); svc_rpc_gss_release_client(client); sx_xlock(&svc_rpc_gss_lock); goto again; } } sx_xunlock(&svc_rpc_gss_lock); } #ifdef DEBUG /* * OID<->string routines. These are uuuuugly. */ static OM_uint32 gss_oid_to_str(OM_uint32 *minor_status, gss_OID oid, gss_buffer_t oid_str) { char numstr[128]; unsigned long number; int numshift; size_t string_length; size_t i; unsigned char *cp; char *bp; /* Decoded according to krb5/gssapi_krb5.c */ /* First determine the size of the string */ string_length = 0; number = 0; numshift = 0; cp = (unsigned char *) oid->elements; number = (unsigned long) cp[0]; sprintf(numstr, "%ld ", number/40); string_length += strlen(numstr); sprintf(numstr, "%ld ", number%40); string_length += strlen(numstr); for (i=1; ilength; i++) { if ( (size_t) (numshift+7) < (sizeof(unsigned long)*8)) { number = (number << 7) | (cp[i] & 0x7f); numshift += 7; } else { *minor_status = 0; return(GSS_S_FAILURE); } if ((cp[i] & 0x80) == 0) { sprintf(numstr, "%ld ", number); string_length += strlen(numstr); number = 0; numshift = 0; } } /* * If we get here, we've calculated the length of "n n n ... n ". Add 4 * here for "{ " and "}\0". */ string_length += 4; if ((bp = malloc(string_length, M_GSSAPI, M_WAITOK | M_ZERO))) { strcpy(bp, "{ "); number = (unsigned long) cp[0]; sprintf(numstr, "%ld ", number/40); strcat(bp, numstr); sprintf(numstr, "%ld ", number%40); strcat(bp, numstr); number = 0; cp = (unsigned char *) oid->elements; for (i=1; ilength; i++) { number = (number << 7) | (cp[i] & 0x7f); if ((cp[i] & 0x80) == 0) { sprintf(numstr, "%ld ", number); strcat(bp, numstr); number = 0; } } strcat(bp, "}"); oid_str->length = strlen(bp)+1; oid_str->value = (void *) bp; *minor_status = 0; return(GSS_S_COMPLETE); } *minor_status = 0; return(GSS_S_FAILURE); } #endif static void svc_rpc_gss_build_ucred(struct svc_rpc_gss_client *client, const gss_name_t name) { OM_uint32 maj_stat, min_stat; rpc_gss_ucred_t *uc = &client->cl_ucred; int numgroups; uc->uid = 65534; uc->gid = 65534; uc->gidlist = client->cl_gid_storage; numgroups = NGROUPS; maj_stat = gss_pname_to_unix_cred(&min_stat, name, client->cl_mech, &uc->uid, &uc->gid, &numgroups, &uc->gidlist[0]); if (GSS_ERROR(maj_stat)) uc->gidlen = 0; else uc->gidlen = numgroups; } static void svc_rpc_gss_set_flavor(struct svc_rpc_gss_client *client) { static gss_OID_desc krb5_mech_oid = {9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" }; /* * Attempt to translate mech type and service into a * 'pseudo flavor'. Hardwire in krb5 support for now. */ if (kgss_oid_equal(client->cl_mech, &krb5_mech_oid)) { switch (client->cl_rawcred.service) { case rpc_gss_svc_default: case rpc_gss_svc_none: client->cl_rpcflavor = RPCSEC_GSS_KRB5; break; case rpc_gss_svc_integrity: client->cl_rpcflavor = RPCSEC_GSS_KRB5I; break; case rpc_gss_svc_privacy: client->cl_rpcflavor = RPCSEC_GSS_KRB5P; break; } } else { client->cl_rpcflavor = RPCSEC_GSS; } } static bool_t svc_rpc_gss_accept_sec_context(struct svc_rpc_gss_client *client, struct svc_req *rqst, struct rpc_gss_init_res *gr, struct rpc_gss_cred *gc) { gss_buffer_desc recv_tok; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags; OM_uint32 cred_lifetime; struct svc_rpc_gss_svc_name *sname; rpc_gss_log_debug("in svc_rpc_gss_accept_context()"); /* Deserialize arguments. */ memset(&recv_tok, 0, sizeof(recv_tok)); if (!svc_getargs(rqst, (xdrproc_t) xdr_gss_buffer_desc, (caddr_t) &recv_tok)) { client->cl_state = CLIENT_STALE; return (FALSE); } /* * First time round, try all the server names we have until * one matches. Afterwards, stick with that one. */ sx_xlock(&svc_rpc_gss_lock); if (!client->cl_sname) { SLIST_FOREACH(sname, &svc_rpc_gss_svc_names, sn_link) { if (sname->sn_program == rqst->rq_prog && sname->sn_version == rqst->rq_vers) { retry: gr->gr_major = gss_accept_sec_context( &gr->gr_minor, &client->cl_ctx, sname->sn_cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &client->cl_cname, &mech, &gr->gr_token, &ret_flags, &cred_lifetime, &client->cl_creds); if (gr->gr_major == GSS_S_CREDENTIALS_EXPIRED) { /* * Either our creds really did * expire or gssd was * restarted. */ if (rpc_gss_acquire_svc_cred(sname)) goto retry; } client->cl_sname = sname; break; } } if (!sname) { xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_tok); sx_xunlock(&svc_rpc_gss_lock); return (FALSE); } } else { gr->gr_major = gss_accept_sec_context( &gr->gr_minor, &client->cl_ctx, client->cl_sname->sn_cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &client->cl_cname, &mech, &gr->gr_token, &ret_flags, &cred_lifetime, NULL); } sx_xunlock(&svc_rpc_gss_lock); xdr_free((xdrproc_t) xdr_gss_buffer_desc, (char *) &recv_tok); /* * If we get an error from gss_accept_sec_context, send the * reply anyway so that the client gets a chance to see what * is wrong. */ if (gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { rpc_gss_log_status("accept_sec_context", client->cl_mech, gr->gr_major, gr->gr_minor); client->cl_state = CLIENT_STALE; return (TRUE); } gr->gr_handle.value = &client->cl_id; gr->gr_handle.length = sizeof(client->cl_id); gr->gr_win = SVC_RPC_GSS_SEQWINDOW; /* Save client info. */ client->cl_mech = mech; client->cl_qop = GSS_C_QOP_DEFAULT; client->cl_done_callback = FALSE; if (gr->gr_major == GSS_S_COMPLETE) { gss_buffer_desc export_name; /* * Change client expiration time to be near when the * client creds expire (or 24 hours if we can't figure * that out). */ if (cred_lifetime == GSS_C_INDEFINITE) cred_lifetime = 24*60*60; /* * Cap cred_lifetime if sysctl kern.rpc.gss.lifetime_max is set. */ if (svc_rpc_gss_lifetime_max > 0 && cred_lifetime > svc_rpc_gss_lifetime_max) cred_lifetime = svc_rpc_gss_lifetime_max; client->cl_expiration = time_uptime + cred_lifetime; /* * Fill in cred details in the rawcred structure. */ client->cl_rawcred.version = RPCSEC_GSS_VERSION; rpc_gss_oid_to_mech(mech, &client->cl_rawcred.mechanism); maj_stat = gss_export_name(&min_stat, client->cl_cname, &export_name); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_export_name", client->cl_mech, maj_stat, min_stat); return (FALSE); } client->cl_rawcred.client_principal = mem_alloc(sizeof(*client->cl_rawcred.client_principal) + export_name.length); client->cl_rawcred.client_principal->len = export_name.length; memcpy(client->cl_rawcred.client_principal->name, export_name.value, export_name.length); gss_release_buffer(&min_stat, &export_name); client->cl_rawcred.svc_principal = client->cl_sname->sn_principal; client->cl_rawcred.service = gc->gc_svc; /* * Use gss_pname_to_uid to map to unix creds. For * kerberos5, this uses krb5_aname_to_localname. */ svc_rpc_gss_build_ucred(client, client->cl_cname); svc_rpc_gss_set_flavor(client); gss_release_name(&min_stat, &client->cl_cname); #ifdef DEBUG { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); rpc_gss_log_debug("accepted context for %s with " "", client->cl_rawcred.client_principal->name, mechname.length, (char *)mechname.value, client->cl_qop, client->cl_rawcred.service); gss_release_buffer(&min_stat, &mechname); } #endif /* DEBUG */ } return (TRUE); } static bool_t svc_rpc_gss_validate(struct svc_rpc_gss_client *client, struct rpc_msg *msg, gss_qop_t *qop, rpc_gss_proc_t gcproc) { struct opaque_auth *oa; gss_buffer_desc rpcbuf, checksum; OM_uint32 maj_stat, min_stat; gss_qop_t qop_state; int32_t rpchdr[128 / sizeof(int32_t)]; int32_t *buf; rpc_gss_log_debug("in svc_rpc_gss_validate()"); memset(rpchdr, 0, sizeof(rpchdr)); /* Reconstruct RPC header for signing (from xdr_callmsg). */ buf = rpchdr; IXDR_PUT_LONG(buf, msg->rm_xid); IXDR_PUT_ENUM(buf, msg->rm_direction); IXDR_PUT_LONG(buf, msg->rm_call.cb_rpcvers); IXDR_PUT_LONG(buf, msg->rm_call.cb_prog); IXDR_PUT_LONG(buf, msg->rm_call.cb_vers); IXDR_PUT_LONG(buf, msg->rm_call.cb_proc); oa = &msg->rm_call.cb_cred; IXDR_PUT_ENUM(buf, oa->oa_flavor); IXDR_PUT_LONG(buf, oa->oa_length); if (oa->oa_length) { memcpy((caddr_t)buf, oa->oa_base, oa->oa_length); buf += RNDUP(oa->oa_length) / sizeof(int32_t); } rpcbuf.value = rpchdr; rpcbuf.length = (u_char *)buf - (u_char *)rpchdr; checksum.value = msg->rm_call.cb_verf.oa_base; checksum.length = msg->rm_call.cb_verf.oa_length; maj_stat = gss_verify_mic(&min_stat, client->cl_ctx, &rpcbuf, &checksum, &qop_state); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_verify_mic", client->cl_mech, maj_stat, min_stat); /* * A bug in some versions of the Linux client generates a * Destroy operation with a bogus encrypted checksum. Deleting * the credential handle for that case causes the mount to fail. * Since the checksum is bogus (gss_verify_mic() failed), it * doesn't make sense to destroy the handle and not doing so * fixes the Linux mount. */ if (gcproc != RPCSEC_GSS_DESTROY) client->cl_state = CLIENT_STALE; return (FALSE); } *qop = qop_state; return (TRUE); } static bool_t svc_rpc_gss_nextverf(struct svc_rpc_gss_client *client, struct svc_req *rqst, u_int seq) { gss_buffer_desc signbuf; gss_buffer_desc mic; OM_uint32 maj_stat, min_stat; uint32_t nseq; rpc_gss_log_debug("in svc_rpc_gss_nextverf()"); nseq = htonl(seq); signbuf.value = &nseq; signbuf.length = sizeof(nseq); maj_stat = gss_get_mic(&min_stat, client->cl_ctx, client->cl_qop, &signbuf, &mic); if (maj_stat != GSS_S_COMPLETE) { rpc_gss_log_status("gss_get_mic", client->cl_mech, maj_stat, min_stat); client->cl_state = CLIENT_STALE; return (FALSE); } KASSERT(mic.length <= MAX_AUTH_BYTES, ("MIC too large for RPCSEC_GSS")); rqst->rq_verf.oa_flavor = RPCSEC_GSS; rqst->rq_verf.oa_length = mic.length; bcopy(mic.value, rqst->rq_verf.oa_base, mic.length); gss_release_buffer(&min_stat, &mic); return (TRUE); } static bool_t svc_rpc_gss_callback(struct svc_rpc_gss_client *client, struct svc_req *rqst) { struct svc_rpc_gss_callback *scb; rpc_gss_lock_t lock; void *cookie; bool_t cb_res; bool_t result; /* * See if we have a callback for this guy. */ result = TRUE; SLIST_FOREACH(scb, &svc_rpc_gss_callbacks, cb_link) { if (scb->cb_callback.program == rqst->rq_prog && scb->cb_callback.version == rqst->rq_vers) { /* * This one matches. Call the callback and see * if it wants to veto or something. */ lock.locked = FALSE; lock.raw_cred = &client->cl_rawcred; cb_res = scb->cb_callback.callback(rqst, client->cl_creds, client->cl_ctx, &lock, &cookie); if (!cb_res) { client->cl_state = CLIENT_STALE; result = FALSE; break; } /* * The callback accepted the connection - it * is responsible for freeing client->cl_creds * now. */ client->cl_creds = GSS_C_NO_CREDENTIAL; client->cl_locked = lock.locked; client->cl_cookie = cookie; return (TRUE); } } /* * Either no callback exists for this program/version or one * of the callbacks rejected the connection. We just need to * clean up the delegated client creds, if any. */ if (client->cl_creds) { OM_uint32 min_ver; gss_release_cred(&min_ver, &client->cl_creds); } return (result); } static bool_t svc_rpc_gss_check_replay(struct svc_rpc_gss_client *client, uint32_t seq) { u_int32_t offset; int word, bit; bool_t result; sx_xlock(&client->cl_lock); if (seq <= client->cl_seqlast) { /* * The request sequence number is less than * the largest we have seen so far. If it is * outside the window or if we have seen a * request with this sequence before, silently * discard it. */ offset = client->cl_seqlast - seq; if (offset >= SVC_RPC_GSS_SEQWINDOW) { result = FALSE; goto out; } word = offset / 32; bit = offset % 32; if (client->cl_seqmask[word] & (1 << bit)) { result = FALSE; goto out; } } result = TRUE; out: sx_xunlock(&client->cl_lock); return (result); } static void svc_rpc_gss_update_seq(struct svc_rpc_gss_client *client, uint32_t seq) { int offset, i, word, bit; uint32_t carry, newcarry; sx_xlock(&client->cl_lock); if (seq > client->cl_seqlast) { /* * This request has a sequence number greater * than any we have seen so far. Advance the * seq window and set bit zero of the window * (which corresponds to the new sequence * number) */ offset = seq - client->cl_seqlast; while (offset > 32) { for (i = (SVC_RPC_GSS_SEQWINDOW / 32) - 1; i > 0; i--) { client->cl_seqmask[i] = client->cl_seqmask[i-1]; } client->cl_seqmask[0] = 0; offset -= 32; } carry = 0; for (i = 0; i < SVC_RPC_GSS_SEQWINDOW / 32; i++) { newcarry = client->cl_seqmask[i] >> (32 - offset); client->cl_seqmask[i] = (client->cl_seqmask[i] << offset) | carry; carry = newcarry; } client->cl_seqmask[0] |= 1; client->cl_seqlast = seq; } else { offset = client->cl_seqlast - seq; word = offset / 32; bit = offset % 32; client->cl_seqmask[word] |= (1 << bit); } sx_xunlock(&client->cl_lock); } enum auth_stat svc_rpc_gss(struct svc_req *rqst, struct rpc_msg *msg) { OM_uint32 min_stat; XDR xdrs; struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; struct rpc_gss_cred gc; struct rpc_gss_init_res gr; gss_qop_t qop; int call_stat; enum auth_stat result; rpc_gss_log_debug("in svc_rpc_gss()"); /* Garbage collect old clients. */ svc_rpc_gss_timeout_clients(); /* Initialize reply. */ rqst->rq_verf = _null_auth; /* Deserialize client credentials. */ if (rqst->rq_cred.oa_length <= 0) return (AUTH_BADCRED); memset(&gc, 0, sizeof(gc)); xdrmem_create(&xdrs, rqst->rq_cred.oa_base, rqst->rq_cred.oa_length, XDR_DECODE); if (!xdr_rpc_gss_cred(&xdrs, &gc)) { XDR_DESTROY(&xdrs); return (AUTH_BADCRED); } XDR_DESTROY(&xdrs); client = NULL; /* Check version. */ if (gc.gc_version != RPCSEC_GSS_VERSION) { result = AUTH_BADCRED; goto out; } /* Check the proc and find the client (or create it) */ if (gc.gc_proc == RPCSEC_GSS_INIT) { if (gc.gc_handle.length != 0) { result = AUTH_BADCRED; goto out; } client = svc_rpc_gss_create_client(); } else { struct svc_rpc_gss_clientid *p; if (gc.gc_handle.length != sizeof(*p)) { result = AUTH_BADCRED; goto out; } p = gc.gc_handle.value; client = svc_rpc_gss_find_client(p); if (!client) { /* * Can't find the client - we may have * destroyed it - tell the other side to * re-authenticate. */ result = RPCSEC_GSS_CREDPROBLEM; goto out; } } cc = rqst->rq_clntcred; cc->cc_client = client; cc->cc_service = gc.gc_svc; cc->cc_seq = gc.gc_seq; /* * The service and sequence number must be ignored for * RPCSEC_GSS_INIT and RPCSEC_GSS_CONTINUE_INIT. */ if (gc.gc_proc != RPCSEC_GSS_INIT && gc.gc_proc != RPCSEC_GSS_CONTINUE_INIT) { /* * Check for sequence number overflow. */ if (gc.gc_seq >= MAXSEQ) { result = RPCSEC_GSS_CTXPROBLEM; goto out; } /* * Check for valid service. */ if (gc.gc_svc != rpc_gss_svc_none && gc.gc_svc != rpc_gss_svc_integrity && gc.gc_svc != rpc_gss_svc_privacy) { result = AUTH_BADCRED; goto out; } } /* Handle RPCSEC_GSS control procedure. */ switch (gc.gc_proc) { case RPCSEC_GSS_INIT: case RPCSEC_GSS_CONTINUE_INIT: if (rqst->rq_proc != NULLPROC) { result = AUTH_REJECTEDCRED; break; } memset(&gr, 0, sizeof(gr)); if (!svc_rpc_gss_accept_sec_context(client, rqst, &gr, &gc)) { result = AUTH_REJECTEDCRED; break; } if (gr.gr_major == GSS_S_COMPLETE) { /* * We borrow the space for the call verf to * pack our reply verf. */ rqst->rq_verf = msg->rm_call.cb_verf; if (!svc_rpc_gss_nextverf(client, rqst, gr.gr_win)) { result = AUTH_REJECTEDCRED; break; } } else { rqst->rq_verf = _null_auth; } call_stat = svc_sendreply(rqst, (xdrproc_t) xdr_rpc_gss_init_res, (caddr_t) &gr); gss_release_buffer(&min_stat, &gr.gr_token); if (!call_stat) { result = AUTH_FAILED; break; } if (gr.gr_major == GSS_S_COMPLETE) client->cl_state = CLIENT_ESTABLISHED; result = RPCSEC_GSS_NODISPATCH; break; case RPCSEC_GSS_DATA: case RPCSEC_GSS_DESTROY: if (!svc_rpc_gss_check_replay(client, gc.gc_seq)) { result = RPCSEC_GSS_NODISPATCH; break; } if (!svc_rpc_gss_validate(client, msg, &qop, gc.gc_proc)) { result = RPCSEC_GSS_CREDPROBLEM; break; } /* * We borrow the space for the call verf to pack our * reply verf. */ rqst->rq_verf = msg->rm_call.cb_verf; if (!svc_rpc_gss_nextverf(client, rqst, gc.gc_seq)) { result = RPCSEC_GSS_CTXPROBLEM; break; } svc_rpc_gss_update_seq(client, gc.gc_seq); /* * Change the SVCAUTH ops on the request to point at * our own code so that we can unwrap the arguments * and wrap the result. The caller will re-set this on * every request to point to a set of null wrap/unwrap * methods. Acquire an extra reference to the client * which will be released by svc_rpc_gss_release() * after the request has finished processing. */ refcount_acquire(&client->cl_refs); rqst->rq_auth.svc_ah_ops = &svc_auth_gss_ops; rqst->rq_auth.svc_ah_private = cc; if (gc.gc_proc == RPCSEC_GSS_DATA) { /* * We might be ready to do a callback to the server to * see if it wants to accept/reject the connection. */ sx_xlock(&client->cl_lock); if (!client->cl_done_callback) { client->cl_done_callback = TRUE; client->cl_qop = qop; client->cl_rawcred.qop = _rpc_gss_num_to_qop( client->cl_rawcred.mechanism, qop); if (!svc_rpc_gss_callback(client, rqst)) { result = AUTH_REJECTEDCRED; sx_xunlock(&client->cl_lock); break; } } sx_xunlock(&client->cl_lock); /* * If the server has locked this client to a * particular service+qop pair, enforce that * restriction now. */ if (client->cl_locked) { if (client->cl_rawcred.service != gc.gc_svc) { result = AUTH_FAILED; break; } else if (client->cl_qop != qop) { result = AUTH_BADVERF; break; } } /* * If the qop changed, look up the new qop * name for rawcred. */ if (client->cl_qop != qop) { client->cl_qop = qop; client->cl_rawcred.qop = _rpc_gss_num_to_qop( client->cl_rawcred.mechanism, qop); } /* * Make sure we use the right service value * for unwrap/wrap. */ if (client->cl_rawcred.service != gc.gc_svc) { client->cl_rawcred.service = gc.gc_svc; svc_rpc_gss_set_flavor(client); } result = AUTH_OK; } else { if (rqst->rq_proc != NULLPROC) { result = AUTH_REJECTEDCRED; break; } call_stat = svc_sendreply(rqst, (xdrproc_t) xdr_void, (caddr_t) NULL); if (!call_stat) { result = AUTH_FAILED; break; } svc_rpc_gss_forget_client(client); result = RPCSEC_GSS_NODISPATCH; break; } break; default: result = AUTH_BADCRED; break; } out: if (client) svc_rpc_gss_release_client(client); xdr_free((xdrproc_t) xdr_rpc_gss_cred, (char *) &gc); return (result); } static bool_t svc_rpc_gss_wrap(SVCAUTH *auth, struct mbuf **mp) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_wrap()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; if (client->cl_state != CLIENT_ESTABLISHED || cc->cc_service == rpc_gss_svc_none || *mp == NULL) { return (TRUE); } return (xdr_rpc_gss_wrap_data(mp, client->cl_ctx, client->cl_qop, cc->cc_service, cc->cc_seq)); } static bool_t svc_rpc_gss_unwrap(SVCAUTH *auth, struct mbuf **mp) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_unwrap()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; if (client->cl_state != CLIENT_ESTABLISHED || cc->cc_service == rpc_gss_svc_none) { return (TRUE); } return (xdr_rpc_gss_unwrap_data(mp, client->cl_ctx, client->cl_qop, cc->cc_service, cc->cc_seq)); } static void svc_rpc_gss_release(SVCAUTH *auth) { struct svc_rpc_gss_cookedcred *cc; struct svc_rpc_gss_client *client; rpc_gss_log_debug("in svc_rpc_gss_release()"); cc = (struct svc_rpc_gss_cookedcred *) auth->svc_ah_private; client = cc->cc_client; svc_rpc_gss_release_client(client); } diff --git a/sys/rpc/rpcsec_tls/auth_tls.c b/sys/rpc/rpcsec_tls/auth_tls.c index bd23784f5ebf..9afde553283e 100644 --- a/sys/rpc/rpcsec_tls/auth_tls.c +++ b/sys/rpc/rpcsec_tls/auth_tls.c @@ -1,169 +1,169 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * auth_none.c * Creates a client authentication handle for passing "null" * credentials and verifiers to remote systems. * * Copyright (C) 1984, Sun Microsystems, Inc. */ /* * Modified from auth_none.c to expect a reply verifier of "STARTTLS" * for the RPC-over-TLS STARTTLS command. */ #include #include #include #include #include #include #include #include #include #include #include #define MAX_MARSHAL_SIZE 20 /* * Authenticator operations routines */ static bool_t authtls_marshal (AUTH *, uint32_t, XDR *, struct mbuf *); static void authtls_verf (AUTH *); static bool_t authtls_validate (AUTH *, uint32_t, struct opaque_auth *, struct mbuf **); static bool_t authtls_refresh (AUTH *, void *); static void authtls_destroy (AUTH *); -static struct auth_ops authtls_ops = { +static const struct auth_ops authtls_ops = { .ah_nextverf = authtls_verf, .ah_marshal = authtls_marshal, .ah_validate = authtls_validate, .ah_refresh = authtls_refresh, .ah_destroy = authtls_destroy, }; struct authtls_private { AUTH no_client; char mclient[MAX_MARSHAL_SIZE]; u_int mcnt; }; static struct authtls_private authtls_private; static struct opaque_auth _tls_null_auth; static void authtls_init(void *dummy) { struct authtls_private *ap = &authtls_private; XDR xdrs; _tls_null_auth.oa_flavor = AUTH_TLS; _tls_null_auth.oa_base = NULL; _tls_null_auth.oa_length = 0; ap->no_client.ah_cred = _tls_null_auth; ap->no_client.ah_verf = _null_auth; ap->no_client.ah_ops = &authtls_ops; xdrmem_create(&xdrs, ap->mclient, MAX_MARSHAL_SIZE, XDR_ENCODE); xdr_opaque_auth(&xdrs, &ap->no_client.ah_cred); xdr_opaque_auth(&xdrs, &ap->no_client.ah_verf); ap->mcnt = XDR_GETPOS(&xdrs); XDR_DESTROY(&xdrs); } SYSINIT(authtls_init, SI_SUB_KMEM, SI_ORDER_ANY, authtls_init, NULL); AUTH * authtls_create(void) { struct authtls_private *ap = &authtls_private; return (&ap->no_client); } /*ARGSUSED*/ static bool_t authtls_marshal(AUTH *client, uint32_t xid, XDR *xdrs, struct mbuf *args) { struct authtls_private *ap = &authtls_private; KASSERT(xdrs != NULL, ("authtls_marshal: xdrs is null")); if (!XDR_PUTBYTES(xdrs, ap->mclient, ap->mcnt)) return (FALSE); xdrmbuf_append(xdrs, args); return (TRUE); } /* All these unused parameters are required to keep ANSI-C from grumbling */ /*ARGSUSED*/ static void authtls_verf(AUTH *client) { } /*ARGSUSED*/ static bool_t authtls_validate(AUTH *client, uint32_t xid, struct opaque_auth *opaque, struct mbuf **mrepp) { size_t strsiz; strsiz = strlen(RPCTLS_START_STRING); /* The verifier must be the string RPCTLS_START_STRING. */ if (opaque != NULL && (opaque->oa_length != strsiz || memcmp(opaque->oa_base, RPCTLS_START_STRING, strsiz) != 0)) return (FALSE); return (TRUE); } /*ARGSUSED*/ static bool_t authtls_refresh(AUTH *client, void *dummy) { return (FALSE); } /*ARGSUSED*/ static void authtls_destroy(AUTH *client) { } diff --git a/sys/rpc/svc.h b/sys/rpc/svc.h index 8a94d7058972..d92fa6953891 100644 --- a/sys/rpc/svc.h +++ b/sys/rpc/svc.h @@ -1,924 +1,924 @@ /* $NetBSD: svc.h,v 1.17 2000/06/02 22:57:56 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * from: @(#)svc.h 1.35 88/12/17 SMI * from: @(#)svc.h 1.27 94/04/25 SMI * $FreeBSD$ */ /* * svc.h, Server-side remote procedure call interface. * * Copyright (C) 1986-1993 by Sun Microsystems, Inc. */ #ifndef _RPC_SVC_H #define _RPC_SVC_H #include #ifdef _KERNEL #include #include #include #include #include #include #endif /* * This interface must manage two items concerning remote procedure calling: * * 1) An arbitrary number of transport connections upon which rpc requests * are received. The two most notable transports are TCP and UDP; they are * created and registered by routines in svc_tcp.c and svc_udp.c, respectively; * they in turn call xprt_register and xprt_unregister. * * 2) An arbitrary number of locally registered services. Services are * described by the following four data: program number, version number, * "service dispatch" function, a transport handle, and a boolean that * indicates whether or not the exported program should be registered with a * local binder service; if true the program's number and version and the * port number from the transport handle are registered with the binder. * These data are registered with the rpc svc system via svc_register. * * A service's dispatch function is called whenever an rpc request comes in * on a transport. The request's program and version numbers must match * those of the registered service. The dispatch function is passed two * parameters, struct svc_req * and SVCXPRT *, defined below. */ /* * Service control requests */ #define SVCGET_VERSQUIET 1 #define SVCSET_VERSQUIET 2 #define SVCGET_CONNMAXREC 3 #define SVCSET_CONNMAXREC 4 /* * Operations for rpc_control(). */ #define RPC_SVC_CONNMAXREC_SET 0 /* set max rec size, enable nonblock */ #define RPC_SVC_CONNMAXREC_GET 1 enum xprt_stat { XPRT_DIED, XPRT_MOREREQS, XPRT_IDLE }; struct __rpc_svcxprt; struct mbuf; struct xp_ops { #ifdef _KERNEL /* receive incoming requests */ bool_t (*xp_recv)(struct __rpc_svcxprt *, struct rpc_msg *, struct sockaddr **, struct mbuf **); /* get transport status */ enum xprt_stat (*xp_stat)(struct __rpc_svcxprt *); /* get transport acknowledge sequence */ bool_t (*xp_ack)(struct __rpc_svcxprt *, uint32_t *); /* send reply */ bool_t (*xp_reply)(struct __rpc_svcxprt *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); /* destroy this struct */ void (*xp_destroy)(struct __rpc_svcxprt *); /* catch-all function */ bool_t (*xp_control)(struct __rpc_svcxprt *, const u_int, void *); #else /* receive incoming requests */ bool_t (*xp_recv)(struct __rpc_svcxprt *, struct rpc_msg *); /* get transport status */ enum xprt_stat (*xp_stat)(struct __rpc_svcxprt *); /* get arguments */ bool_t (*xp_getargs)(struct __rpc_svcxprt *, xdrproc_t, void *); /* send reply */ bool_t (*xp_reply)(struct __rpc_svcxprt *, struct rpc_msg *); /* free mem allocated for args */ bool_t (*xp_freeargs)(struct __rpc_svcxprt *, xdrproc_t, void *); /* destroy this struct */ void (*xp_destroy)(struct __rpc_svcxprt *); #endif }; #ifndef _KERNEL struct xp_ops2 { /* catch-all function */ bool_t (*xp_control)(struct __rpc_svcxprt *, const u_int, void *); }; #endif #ifdef _KERNEL struct __rpc_svcpool; struct __rpc_svcgroup; struct __rpc_svcthread; #endif /* * Server side transport handle. In the kernel, transports have a * reference count which tracks the number of currently assigned * worker threads plus one for the service pool's reference. * For NFSv4.1 sessions, a reference is also held for a backchannel. * xp_p2 - Points to the CLIENT structure for the RPC server end * (the client end for callbacks). * Points to the private structure (cl_private) for the * CLIENT structure for the RPC client end (the server * end for callbacks). */ typedef struct __rpc_svcxprt { #ifdef _KERNEL volatile u_int xp_refs; struct sx xp_lock; struct __rpc_svcpool *xp_pool; /* owning pool (see below) */ struct __rpc_svcgroup *xp_group; /* owning group (see below) */ TAILQ_ENTRY(__rpc_svcxprt) xp_link; TAILQ_ENTRY(__rpc_svcxprt) xp_alink; bool_t xp_registered; /* xprt_register has been called */ bool_t xp_active; /* xprt_active has been called */ struct __rpc_svcthread *xp_thread; /* assigned service thread */ struct socket* xp_socket; const struct xp_ops *xp_ops; char *xp_netid; /* network token */ struct sockaddr_storage xp_ltaddr; /* local transport address */ struct sockaddr_storage xp_rtaddr; /* remote transport address */ void *xp_p1; /* private: for use by svc ops */ void *xp_p2; /* private: for use by svc ops */ void *xp_p3; /* private: for use by svc lib */ int xp_type; /* transport type */ int xp_idletimeout; /* idle time before closing */ time_t xp_lastactive; /* time of last RPC */ u_int64_t xp_sockref; /* set by nfsv4 to identify socket */ int xp_upcallset; /* socket upcall is set up */ uint32_t xp_snd_cnt; /* # of bytes to send to socket */ uint32_t xp_snt_cnt; /* # of bytes sent to socket */ bool_t xp_dontrcv; /* Do not receive on the socket */ uint32_t xp_tls; /* RPC-over-TLS on socket */ uint64_t xp_sslsec; /* Userland SSL * */ uint64_t xp_sslusec; uint64_t xp_sslrefno; int xp_ngrps; /* Cred. from TLS cert. */ uid_t xp_uid; gid_t *xp_gidp; #else int xp_fd; u_short xp_port; /* associated port number */ const struct xp_ops *xp_ops; int xp_addrlen; /* length of remote address */ struct sockaddr_in xp_raddr; /* remote addr. (backward ABI compat) */ /* XXX - fvdl stick this here for ABI backward compat reasons */ const struct xp_ops2 *xp_ops2; char *xp_tp; /* transport provider device name */ char *xp_netid; /* network token */ struct netbuf xp_ltaddr; /* local transport address */ struct netbuf xp_rtaddr; /* remote transport address */ struct opaque_auth xp_verf; /* raw response verifier */ void *xp_p1; /* private: for use by svc ops */ void *xp_p2; /* private: for use by svc ops */ void *xp_p3; /* private: for use by svc lib */ int xp_type; /* transport type */ #endif } SVCXPRT; /* * Interface to server-side authentication flavors. */ typedef struct __rpc_svcauth { - struct svc_auth_ops { + const struct svc_auth_ops { #ifdef _KERNEL int (*svc_ah_wrap)(struct __rpc_svcauth *, struct mbuf **); int (*svc_ah_unwrap)(struct __rpc_svcauth *, struct mbuf **); void (*svc_ah_release)(struct __rpc_svcauth *); #else int (*svc_ah_wrap)(struct __rpc_svcauth *, XDR *, xdrproc_t, caddr_t); int (*svc_ah_unwrap)(struct __rpc_svcauth *, XDR *, xdrproc_t, caddr_t); #endif } *svc_ah_ops; void *svc_ah_private; } SVCAUTH; /* * Server transport extensions (accessed via xp_p3). */ typedef struct __rpc_svcxprt_ext { int xp_flags; /* versquiet */ SVCAUTH xp_auth; /* interface to auth methods */ } SVCXPRT_EXT; #ifdef _KERNEL /* * The services list * Each entry represents a set of procedures (an rpc program). * The dispatch routine takes request structs and runs the * appropriate procedure. */ struct svc_callout { TAILQ_ENTRY(svc_callout) sc_link; rpcprog_t sc_prog; rpcvers_t sc_vers; char *sc_netid; void (*sc_dispatch)(struct svc_req *, SVCXPRT *); }; TAILQ_HEAD(svc_callout_list, svc_callout); /* * The services connection loss list * The dispatch routine takes request structs and runs the * appropriate procedure. */ struct svc_loss_callout { TAILQ_ENTRY(svc_loss_callout) slc_link; void (*slc_dispatch)(SVCXPRT *); }; TAILQ_HEAD(svc_loss_callout_list, svc_loss_callout); /* * Service request */ struct svc_req { STAILQ_ENTRY(svc_req) rq_link; /* list of requests for a thread */ struct __rpc_svcthread *rq_thread; /* thread which is to execute this */ uint32_t rq_xid; /* RPC transaction ID */ uint32_t rq_prog; /* service program number */ uint32_t rq_vers; /* service protocol version */ uint32_t rq_proc; /* the desired procedure */ size_t rq_size; /* space used by request */ struct mbuf *rq_args; /* XDR-encoded procedure arguments */ struct opaque_auth rq_cred; /* raw creds from the wire */ struct opaque_auth rq_verf; /* verifier for the reply */ void *rq_clntcred; /* read only cooked cred */ SVCAUTH rq_auth; /* interface to auth methods */ SVCXPRT *rq_xprt; /* associated transport */ struct sockaddr *rq_addr; /* reply address or NULL if connected */ void *rq_p1; /* application workspace */ int rq_p2; /* application workspace */ uint64_t rq_p3; /* application workspace */ uint32_t rq_reply_seq; /* reply socket sequence # */ char rq_credarea[3*MAX_AUTH_BYTES]; }; STAILQ_HEAD(svc_reqlist, svc_req); #define svc_getrpccaller(rq) \ ((rq)->rq_addr ? (rq)->rq_addr : \ (struct sockaddr *) &(rq)->rq_xprt->xp_rtaddr) /* * This structure is used to manage a thread which is executing * requests from a service pool. A service thread is in one of three * states: * * SVCTHREAD_SLEEPING waiting for a request to process * SVCTHREAD_ACTIVE processing a request * SVCTHREAD_EXITING exiting after finishing current request * * Threads which have no work to process sleep on the pool's sp_active * list. When a transport becomes active, it is assigned a service * thread to read and execute pending RPCs. */ typedef struct __rpc_svcthread { struct mtx_padalign st_lock; /* protects st_reqs field */ struct __rpc_svcpool *st_pool; SVCXPRT *st_xprt; /* transport we are processing */ struct svc_reqlist st_reqs; /* RPC requests to execute */ struct cv st_cond; /* sleeping for work */ LIST_ENTRY(__rpc_svcthread) st_ilink; /* idle threads list */ LIST_ENTRY(__rpc_svcthread) st_alink; /* application thread list */ int st_p2; /* application workspace */ uint64_t st_p3; /* application workspace */ } SVCTHREAD; LIST_HEAD(svcthread_list, __rpc_svcthread); /* * A thread group contain all information needed to assign subset of * transports to subset of threads. On systems with many CPUs and many * threads that allows to reduce lock congestion and improve performance. * Hundreds of threads on dozens of CPUs sharing the single pool lock do * not scale well otherwise. */ TAILQ_HEAD(svcxprt_list, __rpc_svcxprt); enum svcpool_state { SVCPOOL_INIT, /* svc_run not called yet */ SVCPOOL_ACTIVE, /* normal running state */ SVCPOOL_THREADWANTED, /* new service thread requested */ SVCPOOL_THREADSTARTING, /* new service thread started */ SVCPOOL_CLOSING /* svc_exit called */ }; typedef struct __rpc_svcgroup { struct mtx_padalign sg_lock; /* protect the thread/req lists */ struct __rpc_svcpool *sg_pool; enum svcpool_state sg_state; /* current pool state */ struct svcxprt_list sg_xlist; /* all transports in the group */ struct svcxprt_list sg_active; /* transports needing service */ struct svcthread_list sg_idlethreads; /* idle service threads */ int sg_minthreads; /* minimum service thread count */ int sg_maxthreads; /* maximum service thread count */ int sg_threadcount; /* current service thread count */ time_t sg_lastcreatetime; /* when we last started a thread */ time_t sg_lastidlecheck; /* when we last checked idle transports */ } SVCGROUP; /* * In the kernel, we can't use global variables to store lists of * transports etc. since otherwise we could not have two unrelated RPC * services running, each on its own thread. We solve this by * importing a tiny part of a Solaris kernel concept, SVCPOOL. * * A service pool contains a set of transports and service callbacks * for a set of related RPC services. The pool handle should be passed * when creating new transports etc. Future work may include extending * this to support something similar to the Solaris multi-threaded RPC * server. */ typedef SVCTHREAD *pool_assign_fn(SVCTHREAD *, struct svc_req *); typedef void pool_done_fn(SVCTHREAD *, struct svc_req *); #define SVC_MAXGROUPS 16 typedef struct __rpc_svcpool { struct mtx_padalign sp_lock; /* protect the transport lists */ const char *sp_name; /* pool name (e.g. "nfsd", "NLM" */ enum svcpool_state sp_state; /* current pool state */ struct proc *sp_proc; /* process which is in svc_run */ struct svc_callout_list sp_callouts; /* (prog,vers)->dispatch list */ struct svc_loss_callout_list sp_lcallouts; /* loss->dispatch list */ int sp_minthreads; /* minimum service thread count */ int sp_maxthreads; /* maximum service thread count */ /* * Hooks to allow an application to control request to thread * placement. */ pool_assign_fn *sp_assign; pool_done_fn *sp_done; /* * These variables are used to put an upper bound on the * amount of memory used by RPC requests which are queued * waiting for execution. */ unsigned long sp_space_low; unsigned long sp_space_high; unsigned long sp_space_used; unsigned long sp_space_used_highest; bool_t sp_space_throttled; int sp_space_throttle_count; struct replay_cache *sp_rcache; /* optional replay cache */ struct sysctl_ctx_list sp_sysctl; int sp_groupcount; /* Number of groups in the pool. */ int sp_nextgroup; /* Next group to assign port. */ SVCGROUP sp_groups[SVC_MAXGROUPS]; /* Thread/port groups. */ } SVCPOOL; #else /* * Service request */ struct svc_req { uint32_t rq_prog; /* service program number */ uint32_t rq_vers; /* service protocol version */ uint32_t rq_proc; /* the desired procedure */ struct opaque_auth rq_cred; /* raw creds from the wire */ void *rq_clntcred; /* read only cooked cred */ SVCXPRT *rq_xprt; /* associated transport */ }; /* * Approved way of getting address of caller */ #define svc_getrpccaller(x) (&(x)->xp_rtaddr) #endif /* * Operations defined on an SVCXPRT handle * * SVCXPRT *xprt; * struct rpc_msg *msg; * xdrproc_t xargs; * void * argsp; */ #ifdef _KERNEL #define SVC_ACQUIRE(xprt) \ refcount_acquire(&(xprt)->xp_refs) #define SVC_RELEASE(xprt) \ if (refcount_release(&(xprt)->xp_refs)) \ SVC_DESTROY(xprt) #define SVC_RECV(xprt, msg, addr, args) \ (*(xprt)->xp_ops->xp_recv)((xprt), (msg), (addr), (args)) #define SVC_STAT(xprt) \ (*(xprt)->xp_ops->xp_stat)(xprt) #define SVC_ACK(xprt, ack) \ ((xprt)->xp_ops->xp_ack == NULL ? FALSE : \ ((ack) == NULL ? TRUE : (*(xprt)->xp_ops->xp_ack)((xprt), (ack)))) #define SVC_REPLY(xprt, msg, addr, m, seq) \ (*(xprt)->xp_ops->xp_reply) ((xprt), (msg), (addr), (m), (seq)) #define SVC_DESTROY(xprt) \ (*(xprt)->xp_ops->xp_destroy)(xprt) #define SVC_CONTROL(xprt, rq, in) \ (*(xprt)->xp_ops->xp_control)((xprt), (rq), (in)) #else #define SVC_RECV(xprt, msg) \ (*(xprt)->xp_ops->xp_recv)((xprt), (msg)) #define svc_recv(xprt, msg) \ (*(xprt)->xp_ops->xp_recv)((xprt), (msg)) #define SVC_STAT(xprt) \ (*(xprt)->xp_ops->xp_stat)(xprt) #define svc_stat(xprt) \ (*(xprt)->xp_ops->xp_stat)(xprt) #define SVC_GETARGS(xprt, xargs, argsp) \ (*(xprt)->xp_ops->xp_getargs)((xprt), (xargs), (argsp)) #define svc_getargs(xprt, xargs, argsp) \ (*(xprt)->xp_ops->xp_getargs)((xprt), (xargs), (argsp)) #define SVC_REPLY(xprt, msg) \ (*(xprt)->xp_ops->xp_reply) ((xprt), (msg)) #define svc_reply(xprt, msg) \ (*(xprt)->xp_ops->xp_reply) ((xprt), (msg)) #define SVC_FREEARGS(xprt, xargs, argsp) \ (*(xprt)->xp_ops->xp_freeargs)((xprt), (xargs), (argsp)) #define svc_freeargs(xprt, xargs, argsp) \ (*(xprt)->xp_ops->xp_freeargs)((xprt), (xargs), (argsp)) #define SVC_DESTROY(xprt) \ (*(xprt)->xp_ops->xp_destroy)(xprt) #define svc_destroy(xprt) \ (*(xprt)->xp_ops->xp_destroy)(xprt) #define SVC_CONTROL(xprt, rq, in) \ (*(xprt)->xp_ops2->xp_control)((xprt), (rq), (in)) #endif #define SVC_EXT(xprt) \ ((SVCXPRT_EXT *) xprt->xp_p3) #define SVC_AUTH(xprt) \ (SVC_EXT(xprt)->xp_auth) /* * Operations defined on an SVCAUTH handle */ #ifdef _KERNEL #define SVCAUTH_WRAP(auth, mp) \ ((auth)->svc_ah_ops->svc_ah_wrap(auth, mp)) #define SVCAUTH_UNWRAP(auth, mp) \ ((auth)->svc_ah_ops->svc_ah_unwrap(auth, mp)) #define SVCAUTH_RELEASE(auth) \ ((auth)->svc_ah_ops->svc_ah_release(auth)) #else #define SVCAUTH_WRAP(auth, xdrs, xfunc, xwhere) \ ((auth)->svc_ah_ops->svc_ah_wrap(auth, xdrs, xfunc, xwhere)) #define SVCAUTH_UNWRAP(auth, xdrs, xfunc, xwhere) \ ((auth)->svc_ah_ops->svc_ah_unwrap(auth, xdrs, xfunc, xwhere)) #endif /* * Service registration * * svc_reg(xprt, prog, vers, dispatch, nconf) * const SVCXPRT *xprt; * const rpcprog_t prog; * const rpcvers_t vers; * const void (*dispatch)(); * const struct netconfig *nconf; */ __BEGIN_DECLS extern bool_t svc_reg(SVCXPRT *, const rpcprog_t, const rpcvers_t, void (*)(struct svc_req *, SVCXPRT *), const struct netconfig *); __END_DECLS /* * Service un-registration * * svc_unreg(prog, vers) * const rpcprog_t prog; * const rpcvers_t vers; */ __BEGIN_DECLS #ifdef _KERNEL extern void svc_unreg(SVCPOOL *, const rpcprog_t, const rpcvers_t); #else extern void svc_unreg(const rpcprog_t, const rpcvers_t); #endif __END_DECLS #ifdef _KERNEL /* * Service connection loss registration * * svc_loss_reg(xprt, dispatch) * const SVCXPRT *xprt; * const void (*dispatch)(); */ __BEGIN_DECLS extern bool_t svc_loss_reg(SVCXPRT *, void (*)(SVCXPRT *)); __END_DECLS /* * Service connection loss un-registration * * svc_loss_unreg(xprt, dispatch) * const SVCXPRT *xprt; * const void (*dispatch)(); */ __BEGIN_DECLS extern void svc_loss_unreg(SVCPOOL *, void (*)(SVCXPRT *)); __END_DECLS #endif /* * Transport registration. * * xprt_register(xprt) * SVCXPRT *xprt; */ __BEGIN_DECLS extern void xprt_register(SVCXPRT *); __END_DECLS /* * Transport un-register * * xprt_unregister(xprt) * SVCXPRT *xprt; */ __BEGIN_DECLS extern void xprt_unregister(SVCXPRT *); extern void __xprt_unregister_unlocked(SVCXPRT *); __END_DECLS #ifdef _KERNEL /* * Called when a transport has pending requests. */ __BEGIN_DECLS extern void xprt_active(SVCXPRT *); extern void xprt_inactive(SVCXPRT *); extern void xprt_inactive_locked(SVCXPRT *); extern void xprt_inactive_self(SVCXPRT *); __END_DECLS #endif /* * When the service routine is called, it must first check to see if it * knows about the procedure; if not, it should call svcerr_noproc * and return. If so, it should deserialize its arguments via * SVC_GETARGS (defined above). If the deserialization does not work, * svcerr_decode should be called followed by a return. Successful * decoding of the arguments should be followed the execution of the * procedure's code and a call to svc_sendreply. * * Also, if the service refuses to execute the procedure due to too- * weak authentication parameters, svcerr_weakauth should be called. * Note: do not confuse access-control failure with weak authentication! * * NB: In pure implementations of rpc, the caller always waits for a reply * msg. This message is sent when svc_sendreply is called. * Therefore pure service implementations should always call * svc_sendreply even if the function logically returns void; use * xdr.h - xdr_void for the xdr routine. HOWEVER, tcp based rpc allows * for the abuse of pure rpc via batched calling or pipelining. In the * case of a batched call, svc_sendreply should NOT be called since * this would send a return message, which is what batching tries to avoid. * It is the service/protocol writer's responsibility to know which calls are * batched and which are not. Warning: responding to batch calls may * deadlock the caller and server processes! */ __BEGIN_DECLS #ifdef _KERNEL extern bool_t svc_sendreply(struct svc_req *, xdrproc_t, void *); extern bool_t svc_sendreply_mbuf(struct svc_req *, struct mbuf *); extern void svcerr_decode(struct svc_req *); extern void svcerr_weakauth(struct svc_req *); extern void svcerr_noproc(struct svc_req *); extern void svcerr_progvers(struct svc_req *, rpcvers_t, rpcvers_t); extern void svcerr_auth(struct svc_req *, enum auth_stat); extern void svcerr_noprog(struct svc_req *); extern void svcerr_systemerr(struct svc_req *); #else extern bool_t svc_sendreply(SVCXPRT *, xdrproc_t, void *); extern void svcerr_decode(SVCXPRT *); extern void svcerr_weakauth(SVCXPRT *); extern void svcerr_noproc(SVCXPRT *); extern void svcerr_progvers(SVCXPRT *, rpcvers_t, rpcvers_t); extern void svcerr_auth(SVCXPRT *, enum auth_stat); extern void svcerr_noprog(SVCXPRT *); extern void svcerr_systemerr(SVCXPRT *); #endif extern int rpc_reg(rpcprog_t, rpcvers_t, rpcproc_t, char *(*)(char *), xdrproc_t, xdrproc_t, char *); __END_DECLS /* * Lowest level dispatching -OR- who owns this process anyway. * Somebody has to wait for incoming requests and then call the correct * service routine. The routine svc_run does infinite waiting; i.e., * svc_run never returns. * Since another (co-existant) package may wish to selectively wait for * incoming calls or other events outside of the rpc architecture, the * routine svc_getreq is provided. It must be passed readfds, the * "in-place" results of a select system call (see select, section 2). */ #ifndef _KERNEL /* * Global keeper of rpc service descriptors in use * dynamic; must be inspected before each call to select */ extern int svc_maxfd; #ifdef FD_SETSIZE extern fd_set svc_fdset; #define svc_fds svc_fdset.fds_bits[0] /* compatibility */ #else extern int svc_fds; #endif /* def FD_SETSIZE */ #endif /* * a small program implemented by the svc_rpc implementation itself; * also see clnt.h for protocol numbers. */ __BEGIN_DECLS extern void rpctest_service(void); __END_DECLS __BEGIN_DECLS extern SVCXPRT *svc_xprt_alloc(void); extern void svc_xprt_free(SVCXPRT *); #ifndef _KERNEL extern void svc_getreq(int); extern void svc_getreqset(fd_set *); extern void svc_getreq_common(int); struct pollfd; extern void svc_getreq_poll(struct pollfd *, int); extern void svc_run(void); extern void svc_exit(void); #else extern void svc_run(SVCPOOL *); extern void svc_exit(SVCPOOL *); extern bool_t svc_getargs(struct svc_req *, xdrproc_t, void *); extern bool_t svc_freeargs(struct svc_req *, xdrproc_t, void *); extern void svc_freereq(struct svc_req *); #endif __END_DECLS /* * Socket to use on svcxxx_create call to get default socket */ #define RPC_ANYSOCK -1 #define RPC_ANYFD RPC_ANYSOCK /* * These are the existing service side transport implementations */ __BEGIN_DECLS #ifdef _KERNEL /* * Create a new service pool. */ extern SVCPOOL* svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base); /* * Destroy a service pool, including all registered transports. */ extern void svcpool_destroy(SVCPOOL *pool); /* * Close a service pool. Similar to svcpool_destroy(), but it does not * free the data structures. As such, the pool can be used again. */ extern void svcpool_close(SVCPOOL *pool); /* * Transport independent svc_create routine. */ extern int svc_create(SVCPOOL *, void (*)(struct svc_req *, SVCXPRT *), const rpcprog_t, const rpcvers_t, const char *); /* * void (*dispatch)(); -- dispatch routine * const rpcprog_t prognum; -- program number * const rpcvers_t versnum; -- version number * const char *nettype; -- network type */ /* * Generic server creation routine. It takes a netconfig structure * instead of a nettype. */ extern SVCXPRT *svc_tp_create(SVCPOOL *, void (*)(struct svc_req *, SVCXPRT *), const rpcprog_t, const rpcvers_t, const char *uaddr, const struct netconfig *); /* * void (*dispatch)(); -- dispatch routine * const rpcprog_t prognum; -- program number * const rpcvers_t versnum; -- version number * const char *uaddr; -- universal address of service * const struct netconfig *nconf; -- netconfig structure */ extern SVCXPRT *svc_dg_create(SVCPOOL *, struct socket *, const size_t, const size_t); /* * struct socket *; -- open connection * const size_t sendsize; -- max send size * const size_t recvsize; -- max recv size */ extern SVCXPRT *svc_vc_create(SVCPOOL *, struct socket *, const size_t, const size_t); /* * struct socket *; -- open connection * const size_t sendsize; -- max send size * const size_t recvsize; -- max recv size */ extern SVCXPRT *svc_vc_create_backchannel(SVCPOOL *); extern void *clnt_bck_create(struct socket *, const rpcprog_t, const rpcvers_t); /* * struct socket *; -- server transport socket * const rpcprog_t prog; -- RPC program number * const rpcvers_t vers; -- RPC program version */ /* * Generic TLI create routine */ extern SVCXPRT *svc_tli_create(SVCPOOL *, struct socket *, const struct netconfig *, const struct t_bind *, const size_t, const size_t); /* * struct socket * so; -- connection end point * const struct netconfig *nconf; -- netconfig structure for network * const struct t_bind *bindaddr; -- local bind address * const size_t sendsz; -- max sendsize * const size_t recvsz; -- max recvsize */ #else /* !_KERNEL */ /* * Transport independent svc_create routine. */ extern int svc_create(void (*)(struct svc_req *, SVCXPRT *), const rpcprog_t, const rpcvers_t, const char *); /* * void (*dispatch)(); -- dispatch routine * const rpcprog_t prognum; -- program number * const rpcvers_t versnum; -- version number * const char *nettype; -- network type */ /* * Generic server creation routine. It takes a netconfig structure * instead of a nettype. */ extern SVCXPRT *svc_tp_create(void (*)(struct svc_req *, SVCXPRT *), const rpcprog_t, const rpcvers_t, const struct netconfig *); /* * void (*dispatch)(); -- dispatch routine * const rpcprog_t prognum; -- program number * const rpcvers_t versnum; -- version number * const struct netconfig *nconf; -- netconfig structure */ /* * Generic TLI create routine */ extern SVCXPRT *svc_tli_create(const int, const struct netconfig *, const struct t_bind *, const u_int, const u_int); /* * const int fd; -- connection end point * const struct netconfig *nconf; -- netconfig structure for network * const struct t_bind *bindaddr; -- local bind address * const u_int sendsz; -- max sendsize * const u_int recvsz; -- max recvsize */ /* * Connectionless and connectionful create routines */ extern SVCXPRT *svc_vc_create(const int, const u_int, const u_int); /* * const int fd; -- open connection end point * const u_int sendsize; -- max send size * const u_int recvsize; -- max recv size */ /* * Added for compatibility to old rpc 4.0. Obsoleted by svc_vc_create(). */ extern SVCXPRT *svcunix_create(int, u_int, u_int, char *); extern SVCXPRT *svc_dg_create(const int, const u_int, const u_int); /* * const int fd; -- open connection * const u_int sendsize; -- max send size * const u_int recvsize; -- max recv size */ /* * the routine takes any *open* connection * descriptor as its first input and is used for open connections. */ extern SVCXPRT *svc_fd_create(const int, const u_int, const u_int); /* * const int fd; -- open connection end point * const u_int sendsize; -- max send size * const u_int recvsize; -- max recv size */ /* * Added for compatibility to old rpc 4.0. Obsoleted by svc_fd_create(). */ extern SVCXPRT *svcunixfd_create(int, u_int, u_int); /* * Memory based rpc (for speed check and testing) */ extern SVCXPRT *svc_raw_create(void); /* * svc_dg_enable_cache() enables the cache on dg transports. */ int svc_dg_enablecache(SVCXPRT *, const u_int); int __rpc_get_local_uid(SVCXPRT *_transp, uid_t *_uid); #endif /* !_KERNEL */ __END_DECLS #ifndef _KERNEL /* for backward compatibility */ #include #endif #endif /* !_RPC_SVC_H */ diff --git a/sys/rpc/svc_auth.c b/sys/rpc/svc_auth.c index 0c5a68688d48..75d1f21d3484 100644 --- a/sys/rpc/svc_auth.c +++ b/sys/rpc/svc_auth.c @@ -1,227 +1,227 @@ /* $NetBSD: svc_auth.c,v 1.12 2000/07/06 03:10:35 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1986-1991 by Sun Microsystems Inc. */ #if defined(LIBC_SCCS) && !defined(lint) #ident "@(#)svc_auth.c 1.16 94/04/24 SMI" static char sccsid[] = "@(#)svc_auth.c 1.26 89/02/07 Copyr 1984 Sun Micro"; #endif #include __FBSDID("$FreeBSD$"); /* * svc_auth.c, Server-side rpc authenticator interface. * */ #include #include #include #include #include #include #include #include static enum auth_stat (*_svcauth_rpcsec_gss)(struct svc_req *, struct rpc_msg *) = NULL; static int (*_svcauth_rpcsec_gss_getcred)(struct svc_req *, struct ucred **, int *); -static struct svc_auth_ops svc_auth_null_ops; +static const struct svc_auth_ops svc_auth_null_ops; /* * The call rpc message, msg has been obtained from the wire. The msg contains * the raw form of credentials and verifiers. authenticate returns AUTH_OK * if the msg is successfully authenticated. If AUTH_OK then the routine also * does the following things: * set rqst->rq_xprt->verf to the appropriate response verifier; * sets rqst->rq_client_cred to the "cooked" form of the credentials. * * NB: rqst->rq_cxprt->verf must be pre-alloctaed; * its length is set appropriately. * * The caller still owns and is responsible for msg->u.cmb.cred and * msg->u.cmb.verf. The authentication system retains ownership of * rqst->rq_client_cred, the cooked credentials. * * There is an assumption that any flavour less than AUTH_NULL is * invalid. */ enum auth_stat _authenticate(struct svc_req *rqst, struct rpc_msg *msg) { int cred_flavor; enum auth_stat dummy; rqst->rq_cred = msg->rm_call.cb_cred; rqst->rq_auth.svc_ah_ops = &svc_auth_null_ops; rqst->rq_auth.svc_ah_private = NULL; cred_flavor = rqst->rq_cred.oa_flavor; switch (cred_flavor) { case AUTH_NULL: dummy = _svcauth_null(rqst, msg); return (dummy); case AUTH_SYS: if ((rqst->rq_xprt->xp_tls & RPCTLS_FLAGS_DISABLED) != 0) return (AUTH_REJECTEDCRED); dummy = _svcauth_unix(rqst, msg); return (dummy); case AUTH_SHORT: if ((rqst->rq_xprt->xp_tls & RPCTLS_FLAGS_DISABLED) != 0) return (AUTH_REJECTEDCRED); dummy = _svcauth_short(rqst, msg); return (dummy); case RPCSEC_GSS: if ((rqst->rq_xprt->xp_tls & RPCTLS_FLAGS_DISABLED) != 0) return (AUTH_REJECTEDCRED); if (!_svcauth_rpcsec_gss) return (AUTH_REJECTEDCRED); dummy = _svcauth_rpcsec_gss(rqst, msg); return (dummy); case AUTH_TLS: dummy = _svcauth_rpcsec_tls(rqst, msg); return (dummy); default: break; } return (AUTH_REJECTEDCRED); } /* * A set of null auth methods used by any authentication protocols * that don't need to inspect or modify the message body. */ static bool_t svcauth_null_wrap(SVCAUTH *auth, struct mbuf **mp) { return (TRUE); } static bool_t svcauth_null_unwrap(SVCAUTH *auth, struct mbuf **mp) { return (TRUE); } static void svcauth_null_release(SVCAUTH *auth) { } -static struct svc_auth_ops svc_auth_null_ops = { - svcauth_null_wrap, - svcauth_null_unwrap, - svcauth_null_release, +static const struct svc_auth_ops svc_auth_null_ops = { + .svc_ah_wrap = svcauth_null_wrap, + .svc_ah_unwrap = svcauth_null_unwrap, + .svc_ah_release = svcauth_null_release, }; /*ARGSUSED*/ enum auth_stat _svcauth_null(struct svc_req *rqst, struct rpc_msg *msg) { rqst->rq_verf = _null_auth; return (AUTH_OK); } int svc_auth_reg(int flavor, enum auth_stat (*svcauth)(struct svc_req *, struct rpc_msg *), int (*getcred)(struct svc_req *, struct ucred **, int *)) { if (flavor == RPCSEC_GSS) { _svcauth_rpcsec_gss = svcauth; _svcauth_rpcsec_gss_getcred = getcred; } return (TRUE); } int svc_getcred(struct svc_req *rqst, struct ucred **crp, int *flavorp) { struct ucred *cr = NULL; int flavor; struct xucred *xcr; SVCXPRT *xprt = rqst->rq_xprt; flavor = rqst->rq_cred.oa_flavor; if (flavorp) *flavorp = flavor; /* * If there are credentials acquired via a TLS * certificate for this TCP connection, use those * instead of what is in the RPC header. */ if ((xprt->xp_tls & (RPCTLS_FLAGS_CERTUSER | RPCTLS_FLAGS_DISABLED)) == RPCTLS_FLAGS_CERTUSER && flavor == AUTH_UNIX) { cr = crget(); cr->cr_uid = cr->cr_ruid = cr->cr_svuid = xprt->xp_uid; crsetgroups(cr, xprt->xp_ngrps, xprt->xp_gidp); cr->cr_rgid = cr->cr_svgid = xprt->xp_gidp[0]; cr->cr_prison = &prison0; prison_hold(cr->cr_prison); *crp = cr; return (TRUE); } switch (flavor) { case AUTH_UNIX: xcr = (struct xucred *) rqst->rq_clntcred; cr = crget(); cr->cr_uid = cr->cr_ruid = cr->cr_svuid = xcr->cr_uid; crsetgroups(cr, xcr->cr_ngroups, xcr->cr_groups); cr->cr_rgid = cr->cr_svgid = cr->cr_groups[0]; cr->cr_prison = &prison0; prison_hold(cr->cr_prison); *crp = cr; return (TRUE); case RPCSEC_GSS: if (!_svcauth_rpcsec_gss_getcred) return (FALSE); return (_svcauth_rpcsec_gss_getcred(rqst, crp, flavorp)); default: return (FALSE); } } diff --git a/sys/rpc/svc_dg.c b/sys/rpc/svc_dg.c index 2bdd0700c044..db1928655618 100644 --- a/sys/rpc/svc_dg.c +++ b/sys/rpc/svc_dg.c @@ -1,307 +1,307 @@ /* $NetBSD: svc_dg.c,v 1.4 2000/07/06 03:10:35 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1986-1991 by Sun Microsystems Inc. */ #if defined(LIBC_SCCS) && !defined(lint) #ident "@(#)svc_dg.c 1.17 94/04/24 SMI" #endif #include __FBSDID("$FreeBSD$"); /* * svc_dg.c, Server side for connectionless RPC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static enum xprt_stat svc_dg_stat(SVCXPRT *); static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); static void svc_dg_destroy(SVCXPRT *); static bool_t svc_dg_control(SVCXPRT *, const u_int, void *); static int svc_dg_soupcall(struct socket *so, void *arg, int waitflag); -static struct xp_ops svc_dg_ops = { +static const struct xp_ops svc_dg_ops = { .xp_recv = svc_dg_recv, .xp_stat = svc_dg_stat, .xp_reply = svc_dg_reply, .xp_destroy = svc_dg_destroy, .xp_control = svc_dg_control, }; /* * Usage: * xprt = svc_dg_create(sock, sendsize, recvsize); * Does other connectionless specific initializations. * Once *xprt is initialized, it is registered. * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable * system defaults are chosen. * The routines returns NULL if a problem occurred. */ static const char svc_dg_str[] = "svc_dg_create: %s"; static const char svc_dg_err1[] = "could not get transport information"; static const char svc_dg_err2[] = "transport does not support data transfer"; static const char __no_mem_str[] = "out of memory"; SVCXPRT * svc_dg_create(SVCPOOL *pool, struct socket *so, size_t sendsize, size_t recvsize) { SVCXPRT *xprt; struct __rpc_sockinfo si; struct sockaddr* sa; int error; if (!__rpc_socket2sockinfo(so, &si)) { printf(svc_dg_str, svc_dg_err1); return (NULL); } /* * Find the receive and the send size */ sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize); recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize); if ((sendsize == 0) || (recvsize == 0)) { printf(svc_dg_str, svc_dg_err2); return (NULL); } xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = NULL; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_dg_ops; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); CURVNET_RESTORE(); if (error) goto freedata; memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); free(sa, M_SONAME); xprt_register(xprt); SOCKBUF_LOCK(&so->so_rcv); soupcall_set(so, SO_RCV, svc_dg_soupcall, xprt); SOCKBUF_UNLOCK(&so->so_rcv); return (xprt); freedata: (void) printf(svc_dg_str, __no_mem_str); svc_xprt_free(xprt); return (NULL); } /*ARGSUSED*/ static enum xprt_stat svc_dg_stat(SVCXPRT *xprt) { if (soreadable(xprt->xp_socket)) return (XPRT_MOREREQS); return (XPRT_IDLE); } static bool_t svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct uio uio; struct sockaddr *raddr; struct mbuf *mreq; XDR xdrs; int error, rcvflag; /* * Serialise access to the socket. */ sx_xlock(&xprt->xp_lock); /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to read a * packet from the socket and process it. If the read fails, * we have drained all pending requests so we call * xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; mreq = NULL; rcvflag = MSG_DONTWAIT; error = soreceive(xprt->xp_socket, &raddr, &uio, &mreq, NULL, &rcvflag); if (error == EWOULDBLOCK) { /* * We must re-test for readability after taking the * lock to protect us in the case where a new packet * arrives on the socket after our call to soreceive * fails with EWOULDBLOCK. The pool lock protects us * from racing the upcall after our soreadable() call * returns false. */ SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (!soreadable(xprt->xp_socket)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); soupcall_clear(xprt->xp_socket, SO_RCV); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); xprt_inactive_self(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); xdrmbuf_create(&xdrs, mreq, XDR_DECODE); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = raddr; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } static bool_t svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error; mrep = m_gethdr(M_WAITOK, MT_DATA); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); error = sosend(xprt->xp_socket, addr, NULL, mrep, NULL, 0, curthread); if (!error) { stat = TRUE; } } else { m_freem(mrep); } XDR_DESTROY(&xdrs); xprt->xp_p2 = NULL; return (stat); } static void svc_dg_destroy(SVCXPRT *xprt) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); soupcall_clear(xprt->xp_socket, SO_RCV); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); sx_destroy(&xprt->xp_lock); if (xprt->xp_socket) (void)soclose(xprt->xp_socket); if (xprt->xp_netid) (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); svc_xprt_free(xprt); } static bool_t /*ARGSUSED*/ svc_dg_control(xprt, rq, in) SVCXPRT *xprt; const u_int rq; void *in; { return (FALSE); } static int svc_dg_soupcall(struct socket *so, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; xprt_active(xprt); return (SU_OK); } diff --git a/sys/rpc/svc_vc.c b/sys/rpc/svc_vc.c index 234feba5c8bd..d81c0b01d84d 100644 --- a/sys/rpc/svc_vc.c +++ b/sys/rpc/svc_vc.c @@ -1,1130 +1,1130 @@ /* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)svc_tcp.c 2.2 88/08/01 4.0 RPCSRC"; #endif #include __FBSDID("$FreeBSD$"); /* * svc_vc.c, Server side for Connection Oriented based RPC. * * Actually implements two flavors of transporter - * a tcp rendezvouser (a listner and connection establisher) * and a record/tcp stream. */ #include "opt_kern_tls.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *); static void svc_vc_rendezvous_destroy(SVCXPRT *); static bool_t svc_vc_null(void); static void svc_vc_destroy(SVCXPRT *); static enum xprt_stat svc_vc_stat(SVCXPRT *); static bool_t svc_vc_ack(SVCXPRT *, uint32_t *); static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *seq); static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in); static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq, void *in); static void svc_vc_backchannel_destroy(SVCXPRT *); static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *); static bool_t svc_vc_backchannel_recv(SVCXPRT *, struct rpc_msg *, struct sockaddr **, struct mbuf **); static bool_t svc_vc_backchannel_reply(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *); static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in); static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr); static int svc_vc_accept(struct socket *head, struct socket **sop); static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag); static int svc_vc_rendezvous_soupcall(struct socket *, void *, int); -static struct xp_ops svc_vc_rendezvous_ops = { +static const struct xp_ops svc_vc_rendezvous_ops = { .xp_recv = svc_vc_rendezvous_recv, .xp_stat = svc_vc_rendezvous_stat, .xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *, struct sockaddr *, struct mbuf *, uint32_t *))svc_vc_null, .xp_destroy = svc_vc_rendezvous_destroy, .xp_control = svc_vc_rendezvous_control }; -static struct xp_ops svc_vc_ops = { +static const struct xp_ops svc_vc_ops = { .xp_recv = svc_vc_recv, .xp_stat = svc_vc_stat, .xp_ack = svc_vc_ack, .xp_reply = svc_vc_reply, .xp_destroy = svc_vc_destroy, .xp_control = svc_vc_control }; -static struct xp_ops svc_vc_backchannel_ops = { +static const struct xp_ops svc_vc_backchannel_ops = { .xp_recv = svc_vc_backchannel_recv, .xp_stat = svc_vc_backchannel_stat, .xp_reply = svc_vc_backchannel_reply, .xp_destroy = svc_vc_backchannel_destroy, .xp_control = svc_vc_backchannel_control }; /* * Usage: * xprt = svc_vc_create(sock, send_buf_size, recv_buf_size); * * Creates, registers, and returns a (rpc) tcp based transporter. * Once *xprt is initialized, it is registered as a transporter * see (svc.h, xprt_register). This routine returns * a NULL if a problem occurred. * * The filedescriptor passed in is expected to refer to a bound, but * not yet connected socket. * * Since streams do buffered io similar to stdio, the caller can specify * how big the send and receive buffers are via the second and third parms; * 0 => use the system default. */ SVCXPRT * svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize, size_t recvsize) { SVCXPRT *xprt; struct sockaddr* sa; int error; SOCK_LOCK(so); if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) { SOCK_UNLOCK(so); CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa); CURVNET_RESTORE(); if (error) return (NULL); xprt = svc_vc_create_conn(pool, so, sa); free(sa, M_SONAME); return (xprt); } SOCK_UNLOCK(so); xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = NULL; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_rendezvous_ops; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); CURVNET_RESTORE(); if (error) { goto cleanup_svc_vc_create; } memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); free(sa, M_SONAME); xprt_register(xprt); solisten(so, -1, curthread); SOLISTEN_LOCK(so); xprt->xp_upcallset = 1; solisten_upcall_set(so, svc_vc_rendezvous_soupcall, xprt); SOLISTEN_UNLOCK(so); return (xprt); cleanup_svc_vc_create: sx_destroy(&xprt->xp_lock); svc_xprt_free(xprt); return (NULL); } /* * Create a new transport for a socket optained via soaccept(). */ SVCXPRT * svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr) { SVCXPRT *xprt; struct cf_conn *cd; struct sockaddr* sa = NULL; struct sockopt opt; int one = 1; int error; bzero(&opt, sizeof(struct sockopt)); opt.sopt_dir = SOPT_SET; opt.sopt_level = SOL_SOCKET; opt.sopt_name = SO_KEEPALIVE; opt.sopt_val = &one; opt.sopt_valsize = sizeof(one); error = sosetopt(so, &opt); if (error) { return (NULL); } if (so->so_proto->pr_protocol == IPPROTO_TCP) { bzero(&opt, sizeof(struct sockopt)); opt.sopt_dir = SOPT_SET; opt.sopt_level = IPPROTO_TCP; opt.sopt_name = TCP_NODELAY; opt.sopt_val = &one; opt.sopt_valsize = sizeof(one); error = sosetopt(so, &opt); if (error) { return (NULL); } } cd = mem_alloc(sizeof(*cd)); cd->strm_stat = XPRT_IDLE; xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = so; xprt->xp_p1 = cd; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_ops; /* * See http://www.connectathon.org/talks96/nfstcp.pdf - client * has a 5 minute timer, server has a 6 minute timer. */ xprt->xp_idletimeout = 6 * 60; memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len); CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); CURVNET_RESTORE(); if (error) goto cleanup_svc_vc_create; memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); free(sa, M_SONAME); xprt_register(xprt); SOCKBUF_LOCK(&so->so_rcv); xprt->xp_upcallset = 1; soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt); SOCKBUF_UNLOCK(&so->so_rcv); /* * Throw the transport into the active list in case it already * has some data buffered. */ sx_xlock(&xprt->xp_lock); xprt_active(xprt); sx_xunlock(&xprt->xp_lock); return (xprt); cleanup_svc_vc_create: sx_destroy(&xprt->xp_lock); svc_xprt_free(xprt); mem_free(cd, sizeof(*cd)); return (NULL); } /* * Create a new transport for a backchannel on a clnt_vc socket. */ SVCXPRT * svc_vc_create_backchannel(SVCPOOL *pool) { SVCXPRT *xprt = NULL; struct cf_conn *cd = NULL; cd = mem_alloc(sizeof(*cd)); cd->strm_stat = XPRT_IDLE; xprt = svc_xprt_alloc(); sx_init(&xprt->xp_lock, "xprt->xp_lock"); xprt->xp_pool = pool; xprt->xp_socket = NULL; xprt->xp_p1 = cd; xprt->xp_p2 = NULL; xprt->xp_ops = &svc_vc_backchannel_ops; return (xprt); } /* * This does all of the accept except the final call to soaccept. The * caller will call soaccept after dropping its locks (soaccept may * call malloc). */ int svc_vc_accept(struct socket *head, struct socket **sop) { struct socket *so; int error = 0; short nbio; /* XXXGL: shouldn't that be an assertion? */ if (!SOLISTENING(head)) { error = EINVAL; goto done; } #ifdef MAC error = mac_socket_check_accept(curthread->td_ucred, head); if (error != 0) goto done; #endif /* * XXXGL: we want non-blocking semantics. The socket could be a * socket created by kernel as well as socket shared with userland, * so we can't be sure about presense of SS_NBIO. We also shall not * toggle it on the socket, since that may surprise userland. So we * set SS_NBIO only temporarily. */ SOLISTEN_LOCK(head); nbio = head->so_state & SS_NBIO; head->so_state |= SS_NBIO; error = solisten_dequeue(head, &so, 0); head->so_state &= (nbio & ~SS_NBIO); if (error) goto done; so->so_state |= nbio; *sop = so; /* connection has been removed from the listen queue */ KNOTE_UNLOCKED(&head->so_rdsel.si_note, 0); done: return (error); } /*ARGSUSED*/ static bool_t svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct socket *so = NULL; struct sockaddr *sa = NULL; int error; SVCXPRT *new_xprt; /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to accept a * connection from the socket and turn it into a new * transport. If the accept fails, we have drained all pending * connections so we call xprt_inactive(). */ sx_xlock(&xprt->xp_lock); error = svc_vc_accept(xprt->xp_socket, &so); if (error == EWOULDBLOCK) { /* * We must re-test for new connections after taking * the lock to protect us in the case where a new * connection arrives after our call to accept fails * with EWOULDBLOCK. */ SOLISTEN_LOCK(xprt->xp_socket); if (TAILQ_EMPTY(&xprt->xp_socket->sol_comp)) xprt_inactive_self(xprt); SOLISTEN_UNLOCK(xprt->xp_socket); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOLISTEN_LOCK(xprt->xp_socket); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(xprt->xp_socket, SO_RCV); } SOLISTEN_UNLOCK(xprt->xp_socket); xprt_inactive_self(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); sa = NULL; error = soaccept(so, &sa); if (error) { /* * XXX not sure if I need to call sofree or soclose here. */ if (sa) free(sa, M_SONAME); return (FALSE); } /* * svc_vc_create_conn will call xprt_register - we don't need * to do anything with the new connection except derefence it. */ new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa); if (!new_xprt) { soclose(so); } else { SVC_RELEASE(new_xprt); } free(sa, M_SONAME); return (FALSE); /* there is never an rpc msg to be processed */ } /*ARGSUSED*/ static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *xprt) { return (XPRT_IDLE); } static void svc_vc_destroy_common(SVCXPRT *xprt) { enum clnt_stat stat; uint32_t reterr; if (xprt->xp_socket) { if ((xprt->xp_tls & (RPCTLS_FLAGS_HANDSHAKE | RPCTLS_FLAGS_HANDSHFAIL)) != 0) { if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * If the upcall fails, the socket has * probably been closed via the rpctlssd * daemon having crashed or been * restarted, so just ignore returned stat. */ stat = rpctls_srv_disconnect(xprt->xp_sslsec, xprt->xp_sslusec, xprt->xp_sslrefno, &reterr); } /* Must sorele() to get rid of reference. */ CURVNET_SET(xprt->xp_socket->so_vnet); SOCK_LOCK(xprt->xp_socket); sorele(xprt->xp_socket); CURVNET_RESTORE(); } else (void)soclose(xprt->xp_socket); } if (xprt->xp_netid) (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); svc_xprt_free(xprt); } static void svc_vc_rendezvous_destroy(SVCXPRT *xprt) { SOLISTEN_LOCK(xprt->xp_socket); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; solisten_upcall_set(xprt->xp_socket, NULL, NULL); } SOLISTEN_UNLOCK(xprt->xp_socket); svc_vc_destroy_common(xprt); } static void svc_vc_destroy(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; CLIENT *cl = (CLIENT *)xprt->xp_p2; SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; if (xprt->xp_socket->so_rcv.sb_upcall != NULL) soupcall_clear(xprt->xp_socket, SO_RCV); } SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); if (cl != NULL) CLNT_RELEASE(cl); svc_vc_destroy_common(xprt); if (cd->mreq) m_freem(cd->mreq); if (cd->mpending) m_freem(cd->mpending); mem_free(cd, sizeof(*cd)); } static void svc_vc_backchannel_destroy(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; struct mbuf *m, *m2; svc_xprt_free(xprt); m = cd->mreq; while (m != NULL) { m2 = m; m = m->m_nextpkt; m_freem(m2); } mem_free(cd, sizeof(*cd)); } /*ARGSUSED*/ static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static bool_t svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in) { return (FALSE); } static enum xprt_stat svc_vc_stat(SVCXPRT *xprt) { struct cf_conn *cd; cd = (struct cf_conn *)(xprt->xp_p1); if (cd->strm_stat == XPRT_DIED) return (XPRT_DIED); if (cd->mreq != NULL && cd->resid == 0 && cd->eor) return (XPRT_MOREREQS); if (soreadable(xprt->xp_socket)) return (XPRT_MOREREQS); return (XPRT_IDLE); } static bool_t svc_vc_ack(SVCXPRT *xprt, uint32_t *ack) { *ack = atomic_load_acq_32(&xprt->xp_snt_cnt); *ack -= sbused(&xprt->xp_socket->so_snd); return (TRUE); } static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *xprt) { struct cf_conn *cd; cd = (struct cf_conn *)(xprt->xp_p1); if (cd->mreq != NULL) return (XPRT_MOREREQS); return (XPRT_IDLE); } /* * If we have an mbuf chain in cd->mpending, try to parse a record from it, * leaving the result in cd->mreq. If we don't have a complete record, leave * the partial result in cd->mreq and try to read more from the socket. */ static int svc_vc_process_pending(SVCXPRT *xprt) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct socket *so = xprt->xp_socket; struct mbuf *m; /* * If cd->resid is non-zero, we have part of the * record already, otherwise we are expecting a record * marker. */ if (!cd->resid && cd->mpending) { /* * See if there is enough data buffered to * make up a record marker. Make sure we can * handle the case where the record marker is * split across more than one mbuf. */ size_t n = 0; uint32_t header; m = cd->mpending; while (n < sizeof(uint32_t) && m) { n += m->m_len; m = m->m_next; } if (n < sizeof(uint32_t)) { so->so_rcv.sb_lowat = sizeof(uint32_t) - n; return (FALSE); } m_copydata(cd->mpending, 0, sizeof(header), (char *)&header); header = ntohl(header); cd->eor = (header & 0x80000000) != 0; cd->resid = header & 0x7fffffff; m_adj(cd->mpending, sizeof(uint32_t)); } /* * Start pulling off mbufs from cd->mpending * until we either have a complete record or * we run out of data. We use m_split to pull * data - it will pull as much as possible and * split the last mbuf if necessary. */ while (cd->mpending && cd->resid) { m = cd->mpending; if (cd->mpending->m_next || cd->mpending->m_len > cd->resid) cd->mpending = m_split(cd->mpending, cd->resid, M_WAITOK); else cd->mpending = NULL; if (cd->mreq) m_last(cd->mreq)->m_next = m; else cd->mreq = m; while (m) { cd->resid -= m->m_len; m = m->m_next; } } /* * Block receive upcalls if we have more data pending, * otherwise report our need. */ if (cd->mpending) so->so_rcv.sb_lowat = INT_MAX; else so->so_rcv.sb_lowat = imax(1, imin(cd->resid, so->so_rcv.sb_hiwat / 2)); return (TRUE); } static bool_t svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct uio uio; struct mbuf *m, *ctrl; struct socket* so = xprt->xp_socket; XDR xdrs; int error, rcvflag; uint32_t reterr, xid_plus_direction[2]; struct cmsghdr *cmsg; struct tls_get_record tgr; enum clnt_stat ret; /* * Serialise access to the socket and our own record parsing * state. */ sx_xlock(&xprt->xp_lock); for (;;) { /* If we have no request ready, check pending queue. */ while (cd->mpending && (cd->mreq == NULL || cd->resid != 0 || !cd->eor)) { if (!svc_vc_process_pending(xprt)) break; } /* Process and return complete request in cd->mreq. */ if (cd->mreq != NULL && cd->resid == 0 && cd->eor) { /* * Now, check for a backchannel reply. * The XID is in the first uint32_t of the reply * and the message direction is the second one. */ if ((cd->mreq->m_len >= sizeof(xid_plus_direction) || m_length(cd->mreq, NULL) >= sizeof(xid_plus_direction)) && xprt->xp_p2 != NULL) { m_copydata(cd->mreq, 0, sizeof(xid_plus_direction), (char *)xid_plus_direction); xid_plus_direction[0] = ntohl(xid_plus_direction[0]); xid_plus_direction[1] = ntohl(xid_plus_direction[1]); /* Check message direction. */ if (xid_plus_direction[1] == REPLY) { clnt_bck_svccall(xprt->xp_p2, cd->mreq, xid_plus_direction[0]); cd->mreq = NULL; continue; } } xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE); cd->mreq = NULL; /* Check for next request in a pending queue. */ svc_vc_process_pending(xprt); if (cd->mreq == NULL || cd->resid != 0) { SOCKBUF_LOCK(&so->so_rcv); if (!soreadable(so)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&so->so_rcv); } sx_xunlock(&xprt->xp_lock); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } /* * If receiving is disabled so that a TLS handshake can be * done by the rpctlssd daemon, return FALSE here. */ rcvflag = MSG_DONTWAIT; if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) rcvflag |= MSG_TLSAPPDATA; tryagain: if (xprt->xp_dontrcv) { sx_xunlock(&xprt->xp_lock); return (FALSE); } /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to * read as much as possible from the socket and put * the result in cd->mpending. If the read fails, * we have drained both cd->mpending and the socket so * we can call xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; ctrl = m = NULL; error = soreceive(so, NULL, &uio, &m, &ctrl, &rcvflag); if (error == EWOULDBLOCK) { /* * We must re-test for readability after * taking the lock to protect us in the case * where a new packet arrives on the socket * after our call to soreceive fails with * EWOULDBLOCK. */ SOCKBUF_LOCK(&so->so_rcv); if (!soreadable(so)) xprt_inactive_self(xprt); SOCKBUF_UNLOCK(&so->so_rcv); sx_xunlock(&xprt->xp_lock); return (FALSE); } /* * A return of ENXIO indicates that there is a * non-application data record at the head of the * socket's receive queue, for TLS connections. * This record needs to be handled in userland * via an SSL_read() call, so do an upcall to the daemon. */ if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0 && error == ENXIO) { /* Disable reception. */ xprt->xp_dontrcv = TRUE; sx_xunlock(&xprt->xp_lock); ret = rpctls_srv_handlerecord(xprt->xp_sslsec, xprt->xp_sslusec, xprt->xp_sslrefno, &reterr); sx_xlock(&xprt->xp_lock); xprt->xp_dontrcv = FALSE; if (ret != RPC_SUCCESS || reterr != RPCTLSERR_OK) { /* * All we can do is soreceive() it and * then toss it. */ rcvflag = MSG_DONTWAIT; goto tryagain; } sx_xunlock(&xprt->xp_lock); xprt_active(xprt); /* Harmless if already active. */ return (FALSE); } if (error) { SOCKBUF_LOCK(&so->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(so, SO_RCV); } SOCKBUF_UNLOCK(&so->so_rcv); xprt_inactive_self(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } if (!m) { /* * EOF - the other end has closed the socket. */ xprt_inactive_self(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } /* Process any record header(s). */ if (ctrl != NULL) { cmsg = mtod(ctrl, struct cmsghdr *); if (cmsg->cmsg_type == TLS_GET_RECORD && cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) { memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr)); /* * This should have been handled by * the rpctls_svc_handlerecord() * upcall. If not, all we can do is * toss it away. */ if (tgr.tls_type != TLS_RLTYPE_APP) { m_freem(m); m_free(ctrl); rcvflag = MSG_DONTWAIT | MSG_TLSAPPDATA; goto tryagain; } } m_free(ctrl); } if (cd->mpending) m_last(cd->mpending)->m_next = m; else cd->mpending = m; } } static bool_t svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct ct_data *ct; struct mbuf *m; XDR xdrs; sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct == NULL) { sx_xunlock(&xprt->xp_lock); return (FALSE); } mtx_lock(&ct->ct_lock); m = cd->mreq; if (m == NULL) { xprt_inactive_self(xprt); mtx_unlock(&ct->ct_lock); sx_xunlock(&xprt->xp_lock); return (FALSE); } cd->mreq = m->m_nextpkt; mtx_unlock(&ct->ct_lock); sx_xunlock(&xprt->xp_lock); xdrmbuf_create(&xdrs, m, XDR_DECODE); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } static bool_t svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error, len, maxextsiz; #ifdef KERN_TLS u_int maxlen; #endif /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); len = mrep->m_pkthdr.len; *mtod(mrep, uint32_t *) = htonl(0x80000000 | (len - sizeof(uint32_t))); /* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */ if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * Copy the mbuf chain to a chain of * ext_pgs mbuf(s) as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mrep = _rpc_copym_into_ext_pgs(mrep, maxextsiz); } atomic_add_32(&xprt->xp_snd_cnt, len); /* * sosend consumes mreq. */ error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL, 0, curthread); if (!error) { atomic_add_rel_32(&xprt->xp_snt_cnt, len); if (seq) *seq = xprt->xp_snd_cnt; stat = TRUE; } else atomic_subtract_32(&xprt->xp_snd_cnt, len); } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); } static bool_t svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { struct ct_data *ct; XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error, maxextsiz; #ifdef KERN_TLS u_int maxlen; #endif /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); *mtod(mrep, uint32_t *) = htonl(0x80000000 | (mrep->m_pkthdr.len - sizeof(uint32_t))); /* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */ if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) { /* * Copy the mbuf chain to a chain of * ext_pgs mbuf(s) as required by KERN_TLS. */ maxextsiz = TLS_MAX_MSG_SIZE_V10_2; #ifdef KERN_TLS if (rpctls_getinfo(&maxlen, false, false)) maxextsiz = min(maxextsiz, maxlen); #endif mrep = _rpc_copym_into_ext_pgs(mrep, maxextsiz); } sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct != NULL) error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL, 0, curthread); else error = EPIPE; sx_xunlock(&xprt->xp_lock); if (!error) { stat = TRUE; } } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); } static bool_t svc_vc_null() { return (FALSE); } static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; if (soreadable(xprt->xp_socket)) xprt_active(xprt); return (SU_OK); } static int svc_vc_rendezvous_soupcall(struct socket *head, void *arg, int waitflag) { SVCXPRT *xprt = (SVCXPRT *) arg; if (!TAILQ_EMPTY(&head->sol_comp)) xprt_active(xprt); return (SU_OK); } #if 0 /* * Get the effective UID of the sending process. Used by rpcbind, keyserv * and rpc.yppasswdd on AF_LOCAL. */ int __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) { int sock, ret; gid_t egid; uid_t euid; struct sockaddr *sa; sock = transp->xp_fd; sa = (struct sockaddr *)transp->xp_rtaddr; if (sa->sa_family == AF_LOCAL) { ret = getpeereid(sock, &euid, &egid); if (ret == 0) *uid = euid; return (ret); } else return (-1); } #endif