Index: head/sys/netipsec/xform_ah.c =================================================================== --- head/sys/netipsec/xform_ah.c (revision 285335) +++ head/sys/netipsec/xform_ah.c (revision 285336) @@ -1,1184 +1,1169 @@ /* $FreeBSD$ */ /* $OpenBSD: ip_ah.c,v 1.63 2001/06/26 06:18:58 angelos Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr) and * Niels Provos (provos@physnet.uni-hamburg.de). * * The original version of this code was written by John Ioannidis * for BSD/OS in Athens, Greece, in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis and Niklas Hallqvist. * * Copyright (c) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * Copyright (c) 1999 Niklas Hallqvist. * Copyright (c) 2001 Angelos D. Keromytis. * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #endif #include #include #include /* * Return header size in bytes. The old protocol did not support * the replay counter; the new protocol always includes the counter. */ #define HDRSIZE(sav) \ (((sav)->flags & SADB_X_EXT_OLD) ? \ sizeof (struct ah) : sizeof (struct ah) + sizeof (u_int32_t)) /* - * Return authenticator size in bytes. The old protocol is known - * to use a fixed 16-byte authenticator. The new algorithm use 12-byte - * authenticator. + * Return authenticator size in bytes, based on a field in the + * algorithm descriptor. */ -#define AUTHSIZE(sav) ah_authsize(sav) +#define AUTHSIZE(sav) \ + ((sav->flags & SADB_X_EXT_OLD) ? 16 : (sav)->tdb_authalgxform->hashsize) VNET_DEFINE(int, ah_enable) = 1; /* control flow of packets with AH */ VNET_DEFINE(int, ah_cleartos) = 1; /* clear ip_tos when doing AH calc */ VNET_PCPUSTAT_DEFINE(struct ahstat, ahstat); VNET_PCPUSTAT_SYSINIT(ahstat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(ahstat); #endif /* VIMAGE */ #ifdef INET SYSCTL_DECL(_net_inet_ah); SYSCTL_INT(_net_inet_ah, OID_AUTO, ah_enable, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ah_enable), 0, ""); SYSCTL_INT(_net_inet_ah, OID_AUTO, ah_cleartos, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ah_cleartos), 0, ""); SYSCTL_VNET_PCPUSTAT(_net_inet_ah, IPSECCTL_STATS, stats, struct ahstat, ahstat, "AH statistics (struct ahstat, netipsec/ah_var.h)"); #endif static unsigned char ipseczeroes[256]; /* larger than an ip6 extension hdr */ static int ah_input_cb(struct cryptop*); static int ah_output_cb(struct cryptop*); -static int -ah_authsize(struct secasvar *sav) -{ - - IPSEC_ASSERT(sav != NULL, ("%s: sav == NULL", __func__)); - - if (sav->flags & SADB_X_EXT_OLD) - return 16; - - switch (sav->alg_auth) { - case SADB_X_AALG_SHA2_256: - return 16; - case SADB_X_AALG_SHA2_384: - return 24; - case SADB_X_AALG_SHA2_512: - return 32; - default: - return AH_HMAC_HASHLEN; - } - /* NOTREACHED */ -} /* * NB: this is public for use by the PF_KEY support. */ struct auth_hash * ah_algorithm_lookup(int alg) { if (alg > SADB_AALG_MAX) return NULL; switch (alg) { case SADB_X_AALG_NULL: return &auth_hash_null; case SADB_AALG_MD5HMAC: return &auth_hash_hmac_md5; case SADB_AALG_SHA1HMAC: return &auth_hash_hmac_sha1; case SADB_X_AALG_RIPEMD160HMAC: return &auth_hash_hmac_ripemd_160; case SADB_X_AALG_MD5: return &auth_hash_key_md5; case SADB_X_AALG_SHA: return &auth_hash_key_sha1; case SADB_X_AALG_SHA2_256: return &auth_hash_hmac_sha2_256; case SADB_X_AALG_SHA2_384: return &auth_hash_hmac_sha2_384; case SADB_X_AALG_SHA2_512: return &auth_hash_hmac_sha2_512; + case SADB_X_AALG_AES128GMAC: + return &auth_hash_nist_gmac_aes_128; + case SADB_X_AALG_AES192GMAC: + return &auth_hash_nist_gmac_aes_192; + case SADB_X_AALG_AES256GMAC: + return &auth_hash_nist_gmac_aes_256; } return NULL; } size_t ah_hdrsiz(struct secasvar *sav) { size_t size; if (sav != NULL) { int authsize; IPSEC_ASSERT(sav->tdb_authalgxform != NULL, ("null xform")); /*XXX not right for null algorithm--does it matter??*/ authsize = AUTHSIZE(sav); size = roundup(authsize, sizeof (u_int32_t)) + HDRSIZE(sav); } else { /* default guess */ size = sizeof (struct ah) + sizeof (u_int32_t) + 16; } return size; } /* * NB: public for use by esp_init. */ int ah_init0(struct secasvar *sav, struct xformsw *xsp, struct cryptoini *cria) { struct auth_hash *thash; int keylen; thash = ah_algorithm_lookup(sav->alg_auth); if (thash == NULL) { DPRINTF(("%s: unsupported authentication algorithm %u\n", __func__, sav->alg_auth)); return EINVAL; } /* * Verify the replay state block allocation is consistent with * the protocol type. We check here so we can make assumptions * later during protocol processing. */ /* NB: replay state is setup elsewhere (sigh) */ if (((sav->flags&SADB_X_EXT_OLD) == 0) ^ (sav->replay != NULL)) { DPRINTF(("%s: replay state block inconsistency, " "%s algorithm %s replay state\n", __func__, (sav->flags & SADB_X_EXT_OLD) ? "old" : "new", sav->replay == NULL ? "without" : "with")); return EINVAL; } if (sav->key_auth == NULL) { DPRINTF(("%s: no authentication key for %s algorithm\n", __func__, thash->name)); return EINVAL; } keylen = _KEYLEN(sav->key_auth); if (keylen != thash->keysize && thash->keysize != 0) { DPRINTF(("%s: invalid keylength %d, algorithm %s requires " "keysize %d\n", __func__, keylen, thash->name, thash->keysize)); return EINVAL; } sav->tdb_xform = xsp; sav->tdb_authalgxform = thash; /* Initialize crypto session. */ bzero(cria, sizeof (*cria)); cria->cri_alg = sav->tdb_authalgxform->type; cria->cri_klen = _KEYBITS(sav->key_auth); cria->cri_key = sav->key_auth->key_data; cria->cri_mlen = AUTHSIZE(sav); return 0; } /* * ah_init() is called when an SPI is being set up. */ static int ah_init(struct secasvar *sav, struct xformsw *xsp) { struct cryptoini cria; int error; error = ah_init0(sav, xsp, &cria); return error ? error : crypto_newsession(&sav->tdb_cryptoid, &cria, V_crypto_support); } /* * Paranoia. * * NB: public for use by esp_zeroize (XXX). */ int ah_zeroize(struct secasvar *sav) { int err; if (sav->key_auth) bzero(sav->key_auth->key_data, _KEYLEN(sav->key_auth)); err = crypto_freesession(sav->tdb_cryptoid); sav->tdb_cryptoid = 0; sav->tdb_authalgxform = NULL; sav->tdb_xform = NULL; return err; } /* * Massage IPv4/IPv6 headers for AH processing. */ static int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int alloc, len, ad; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("%s: m_pullup failed\n", __func__)); return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); if (V_ah_cleartos) ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off &= htons(IP_DF); else ip->ip_off = htons(0); ptr = mtod(m, unsigned char *) + sizeof(struct ip); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("%s: illegal IPv4 option length for " "option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* Fall through */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; bcopy(ipseczeroes, ptr, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("%s: malformed IPv4 options header\n", __func__)); m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("%s: unsupported IPv6 jumbogram\n", __func__)); m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), (caddr_t) &ip6); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { ptr = (unsigned char *) malloc( skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("%s: failed to allocate memory" "for IPv6 headers\n",__func__)); m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else break; off = ip6.ip6_nxt & 0xff; /* Next header type. */ for (len = 0; len < skip - sizeof(struct ip6_hdr);) switch (off) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: ip6e = (struct ip6_ext *) (ptr + len); /* * Process the mutable/immutable * options -- borrows heavily from the * KAME code. */ for (count = len + sizeof(struct ip6_ext); count < len + ((ip6e->ip6e_len + 1) << 3);) { if (ptr[count] == IP6OPT_PAD1) { count++; continue; /* Skip padding. */ } /* Sanity check. */ if (count > len + ((ip6e->ip6e_len + 1) << 3)) { m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } ad = ptr[count + 1]; /* If mutable option, zeroize. */ if (ptr[count] & IP6OPT_MUTABLE) bcopy(ipseczeroes, ptr + count, ptr[count + 1]); count += ad; /* Sanity check. */ if (count > skip - sizeof(struct ip6_hdr)) { m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } } /* Advance. */ len += ((ip6e->ip6e_len + 1) << 3); off = ip6e->ip6e_nxt; break; case IPPROTO_ROUTING: /* * Always include routing headers in * computation. */ ip6e = (struct ip6_ext *) (ptr + len); len += ((ip6e->ip6e_len + 1) << 3); off = ip6e->ip6e_nxt; break; default: DPRINTF(("%s: unexpected IPv6 header type %d", __func__, off)); if (alloc) free(ptr, M_XDATA); m_freem(m); return EINVAL; } /* Copyback and free, if we allocated. */ if (alloc) { m_copyback(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); free(ptr, M_XDATA); } break; #endif /* INET6 */ } return 0; } /* * ah_input() gets called to verify that an input packet * passes authentication. */ static int ah_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { char buf[128]; struct auth_hash *ahx; struct tdb_crypto *tc; struct newah *ah; int hl, rplen, authsize, error; struct cryptodesc *crda; struct cryptop *crp; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->key_auth != NULL, ("null authentication key")); IPSEC_ASSERT(sav->tdb_authalgxform != NULL, ("null authentication xform")); /* Figure out header size. */ rplen = HDRSIZE(sav); /* XXX don't pullup, just copy header */ IP6_EXTHDR_GET(ah, struct newah *, m, skip, rplen); if (ah == NULL) { DPRINTF(("ah_input: cannot pullup header\n")); AHSTAT_INC(ahs_hdrops); /*XXX*/ m_freem(m); return ENOBUFS; } /* Check replay window, if applicable. */ if (sav->replay && !ipsec_chkreplay(ntohl(ah->ah_seq), sav)) { AHSTAT_INC(ahs_replay); DPRINTF(("%s: packet replay failure: %s\n", __func__, ipsec_logsastr(sav, buf, sizeof(buf)))); m_freem(m); return ENOBUFS; } /* Verify AH header length. */ hl = ah->ah_len * sizeof (u_int32_t); ahx = sav->tdb_authalgxform; authsize = AUTHSIZE(sav); if (hl != authsize + rplen - sizeof (struct ah)) { DPRINTF(("%s: bad authenticator length %u (expecting %lu)" " for packet in SA %s/%08lx\n", __func__, hl, (u_long) (authsize + rplen - sizeof (struct ah)), ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_badauthl); m_freem(m); return EACCES; } AHSTAT_ADD(ahs_ibytes, m->m_pkthdr.len - skip - hl); /* Get crypto descriptors. */ crp = crypto_getreq(1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptor\n",__func__)); AHSTAT_INC(ahs_crypto); m_freem(m); return ENOBUFS; } crda = crp->crp_desc; IPSEC_ASSERT(crda != NULL, ("null crypto descriptor")); crda->crd_skip = 0; crda->crd_len = m->m_pkthdr.len; crda->crd_inject = skip + rplen; /* Authentication operation. */ crda->crd_alg = ahx->type; crda->crd_klen = _KEYBITS(sav->key_auth); crda->crd_key = sav->key_auth->key_data; /* Allocate IPsec-specific opaque crypto info. */ tc = (struct tdb_crypto *) malloc(sizeof (struct tdb_crypto) + skip + rplen + authsize, M_XDATA, M_NOWAIT | M_ZERO); if (tc == NULL) { DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__)); AHSTAT_INC(ahs_crypto); crypto_freereq(crp); m_freem(m); return ENOBUFS; } /* * Save the authenticator, the skipped portion of the packet, * and the AH header. */ m_copydata(m, 0, skip + rplen + authsize, (caddr_t)(tc+1)); /* Zeroize the authenticator on the packet. */ m_copyback(m, skip + rplen, authsize, ipseczeroes); /* "Massage" the packet headers for crypto processing. */ error = ah_massage_headers(&m, sav->sah->saidx.dst.sa.sa_family, skip, ahx->type, 0); if (error != 0) { /* NB: mbuf is free'd by ah_massage_headers */ AHSTAT_INC(ahs_hdrops); free(tc, M_XDATA); crypto_freereq(crp); return (error); } /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = ah_input_cb; crp->crp_sid = sav->tdb_cryptoid; crp->crp_opaque = (caddr_t) tc; /* These are passed as-is to the callback. */ tc->tc_spi = sav->spi; tc->tc_dst = sav->sah->saidx.dst; tc->tc_proto = sav->sah->saidx.proto; tc->tc_nxt = ah->ah_nxt; tc->tc_protoff = protoff; tc->tc_skip = skip; KEY_ADDREFSA(sav); tc->tc_sav = sav; return (crypto_dispatch(crp)); } /* * AH input callback from the crypto driver. */ static int ah_input_cb(struct cryptop *crp) { char buf[INET6_ADDRSTRLEN]; int rplen, error, skip, protoff; unsigned char calc[AH_ALEN_MAX]; struct mbuf *m; struct cryptodesc *crd; struct auth_hash *ahx; struct tdb_crypto *tc; struct secasvar *sav; struct secasindex *saidx; u_int8_t nxt; caddr_t ptr; int authsize; crd = crp->crp_desc; tc = (struct tdb_crypto *) crp->crp_opaque; IPSEC_ASSERT(tc != NULL, ("null opaque crypto data area!")); skip = tc->tc_skip; nxt = tc->tc_nxt; protoff = tc->tc_protoff; m = (struct mbuf *) crp->crp_buf; sav = tc->tc_sav; IPSEC_ASSERT(sav != NULL, ("null SA!")); saidx = &sav->sah->saidx; IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("unexpected protocol family %u", saidx->dst.sa.sa_family)); ahx = (struct auth_hash *) sav->tdb_authalgxform; /* Check for crypto errors. */ if (crp->crp_etype) { if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) return (crypto_dispatch(crp)); AHSTAT_INC(ahs_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } else { AHSTAT_INC(ahs_hist[sav->alg_auth]); crypto_freereq(crp); /* No longer needed. */ crp = NULL; } /* Shouldn't happen... */ if (m == NULL) { AHSTAT_INC(ahs_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } /* Figure out header size. */ rplen = HDRSIZE(sav); authsize = AUTHSIZE(sav); /* Copy authenticator off the packet. */ m_copydata(m, skip + rplen, authsize, calc); /* Verify authenticator. */ ptr = (caddr_t) (tc + 1); if (bcmp(ptr + skip + rplen, calc, authsize)) { DPRINTF(("%s: authentication hash mismatch for packet " "in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_badauth); error = EACCES; goto bad; } /* Fix the Next Protocol field. */ ((u_int8_t *) ptr)[protoff] = nxt; /* Copyback the saved (uncooked) network headers. */ m_copyback(m, 0, skip, ptr); free(tc, M_XDATA), tc = NULL; /* No longer needed */ /* * Header is now authenticated. */ m->m_flags |= M_AUTHIPHDR|M_AUTHIPDGM; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newah, ah_seq), sizeof (seq), (caddr_t) &seq); if (ipsec_updatereplay(ntohl(seq), sav)) { AHSTAT_INC(ahs_replay); error = ENOBUFS; /*XXX as above*/ goto bad; } } /* * Remove the AH header and authenticator from the mbuf. */ error = m_striphdr(m, skip, rplen + authsize); if (error) { DPRINTF(("%s: mangled mbuf chain for SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); goto bad; } switch (saidx->dst.sa.sa_family) { #ifdef INET6 case AF_INET6: error = ipsec6_common_input_cb(m, sav, skip, protoff); break; #endif #ifdef INET case AF_INET: error = ipsec4_common_input_cb(m, sav, skip, protoff); break; #endif default: panic("%s: Unexpected address family: %d saidx=%p", __func__, saidx->dst.sa.sa_family, saidx); } KEY_FREESAV(&sav); return error; bad: if (sav) KEY_FREESAV(&sav); if (m != NULL) m_freem(m); if (tc != NULL) free(tc, M_XDATA); if (crp != NULL) crypto_freereq(crp); return error; } /* * AH output routine, called by ipsec[46]_process_packet(). */ static int ah_output(struct mbuf *m, struct ipsecrequest *isr, struct mbuf **mp, int skip, int protoff) { char buf[INET6_ADDRSTRLEN]; struct secasvar *sav; struct auth_hash *ahx; struct cryptodesc *crda; struct tdb_crypto *tc; struct mbuf *mi; struct cryptop *crp; u_int16_t iplen; int error, rplen, authsize, maxpacketsize, roff; u_int8_t prot; struct newah *ah; sav = isr->sav; IPSEC_ASSERT(sav != NULL, ("null SA")); ahx = sav->tdb_authalgxform; IPSEC_ASSERT(ahx != NULL, ("null authentication xform")); AHSTAT_INC(ahs_output); /* Figure out header size. */ rplen = HDRSIZE(sav); /* Check for maximum packet size violations. */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("%s: unknown/unsupported protocol family %u, " "SA %s/%08lx\n", __func__, sav->sah->saidx.dst.sa.sa_family, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_nopf); error = EPFNOSUPPORT; goto bad; } authsize = AUTHSIZE(sav); if (rplen + authsize + m->m_pkthdr.len > maxpacketsize) { DPRINTF(("%s: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi), rplen + authsize + m->m_pkthdr.len, maxpacketsize)); AHSTAT_INC(ahs_toobig); error = EMSGSIZE; goto bad; } /* Update the counters. */ AHSTAT_ADD(ahs_obytes, m->m_pkthdr.len - skip); m = m_unshare(m, M_NOWAIT); if (m == NULL) { DPRINTF(("%s: cannot clone mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); error = ENOBUFS; goto bad; } /* Inject AH header. */ mi = m_makespace(m, skip, rplen + authsize, &roff); if (mi == NULL) { DPRINTF(("%s: failed to inject %u byte AH header for SA " "%s/%08lx\n", __func__, rplen + authsize, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); /*XXX differs from openbsd */ error = ENOBUFS; goto bad; } /* * The AH header is guaranteed by m_makespace() to be in * contiguous memory, at roff bytes offset into the returned mbuf. */ ah = (struct newah *)(mtod(mi, caddr_t) + roff); /* Initialize the AH header. */ m_copydata(m, protoff, sizeof(u_int8_t), (caddr_t) &ah->ah_nxt); ah->ah_len = (rplen + authsize - sizeof(struct ah)) / sizeof(u_int32_t); ah->ah_reserve = 0; ah->ah_spi = sav->spi; /* Zeroize authenticator. */ m_copyback(m, skip + rplen, authsize, ipseczeroes); /* Insert packet replay counter, as requested. */ if (sav->replay) { if (sav->replay->count == ~0 && (sav->flags & SADB_X_EXT_CYCSEQ) == 0) { DPRINTF(("%s: replay counter wrapped for SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_wrap); error = EINVAL; goto bad; } #ifdef REGRESSION /* Emulate replay attack when ipsec_replay is TRUE. */ if (!V_ipsec_replay) #endif sav->replay->count++; ah->ah_seq = htonl(sav->replay->count); } /* Get crypto descriptors. */ crp = crypto_getreq(1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); AHSTAT_INC(ahs_crypto); error = ENOBUFS; goto bad; } crda = crp->crp_desc; crda->crd_skip = 0; crda->crd_inject = skip + rplen; crda->crd_len = m->m_pkthdr.len; /* Authentication operation. */ crda->crd_alg = ahx->type; crda->crd_key = sav->key_auth->key_data; crda->crd_klen = _KEYBITS(sav->key_auth); /* Allocate IPsec-specific opaque crypto info. */ tc = (struct tdb_crypto *) malloc( sizeof(struct tdb_crypto) + skip, M_XDATA, M_NOWAIT|M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__)); AHSTAT_INC(ahs_crypto); error = ENOBUFS; goto bad; } /* Save the skipped portion of the packet. */ m_copydata(m, 0, skip, (caddr_t) (tc + 1)); /* * Fix IP header length on the header used for * authentication. We don't need to fix the original * header length as it will be fixed by our caller. */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: bcopy(((caddr_t)(tc + 1)) + offsetof(struct ip, ip_len), (caddr_t) &iplen, sizeof(u_int16_t)); iplen = htons(ntohs(iplen) + rplen + authsize); m_copyback(m, offsetof(struct ip, ip_len), sizeof(u_int16_t), (caddr_t) &iplen); break; #endif /* INET */ #ifdef INET6 case AF_INET6: bcopy(((caddr_t)(tc + 1)) + offsetof(struct ip6_hdr, ip6_plen), (caddr_t) &iplen, sizeof(u_int16_t)); iplen = htons(ntohs(iplen) + rplen + authsize); m_copyback(m, offsetof(struct ip6_hdr, ip6_plen), sizeof(u_int16_t), (caddr_t) &iplen); break; #endif /* INET6 */ } /* Fix the Next Header field in saved header. */ ((u_int8_t *) (tc + 1))[protoff] = IPPROTO_AH; /* Update the Next Protocol field in the IP header. */ prot = IPPROTO_AH; m_copyback(m, protoff, sizeof(u_int8_t), (caddr_t) &prot); /* "Massage" the packet headers for crypto processing. */ error = ah_massage_headers(&m, sav->sah->saidx.dst.sa.sa_family, skip, ahx->type, 1); if (error != 0) { m = NULL; /* mbuf was free'd by ah_massage_headers. */ free(tc, M_XDATA); crypto_freereq(crp); goto bad; } /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = ah_output_cb; crp->crp_sid = sav->tdb_cryptoid; crp->crp_opaque = (caddr_t) tc; /* These are passed as-is to the callback. */ tc->tc_isr = isr; KEY_ADDREFSA(sav); tc->tc_sav = sav; tc->tc_spi = sav->spi; tc->tc_dst = sav->sah->saidx.dst; tc->tc_proto = sav->sah->saidx.proto; tc->tc_skip = skip; tc->tc_protoff = protoff; return crypto_dispatch(crp); bad: if (m) m_freem(m); return (error); } /* * AH output callback from the crypto driver. */ static int ah_output_cb(struct cryptop *crp) { int skip, protoff, error; struct tdb_crypto *tc; struct ipsecrequest *isr; struct secasvar *sav; struct mbuf *m; caddr_t ptr; tc = (struct tdb_crypto *) crp->crp_opaque; IPSEC_ASSERT(tc != NULL, ("null opaque data area!")); skip = tc->tc_skip; protoff = tc->tc_protoff; ptr = (caddr_t) (tc + 1); m = (struct mbuf *) crp->crp_buf; isr = tc->tc_isr; IPSEC_ASSERT(isr->sp != NULL, ("NULL isr->sp")); IPSECREQUEST_LOCK(isr); sav = tc->tc_sav; /* With the isr lock released SA pointer can be updated. */ if (sav != isr->sav) { AHSTAT_INC(ahs_notdb); DPRINTF(("%s: SA expired while in crypto\n", __func__)); error = ENOBUFS; /*XXX*/ goto bad; } /* Check for crypto errors. */ if (crp->crp_etype) { if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) { IPSECREQUEST_UNLOCK(isr); return (crypto_dispatch(crp)); } AHSTAT_INC(ahs_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { AHSTAT_INC(ahs_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } AHSTAT_INC(ahs_hist[sav->alg_auth]); /* * Copy original headers (with the new protocol number) back * in place. */ m_copyback(m, 0, skip, ptr); /* No longer needed. */ free(tc, M_XDATA); crypto_freereq(crp); #ifdef REGRESSION /* Emulate man-in-the-middle attack when ipsec_integrity is TRUE. */ if (V_ipsec_integrity) { int alen; /* * Corrupt HMAC if we want to test integrity verification of * the other side. */ alen = AUTHSIZE(sav); m_copyback(m, m->m_pkthdr.len - alen, alen, ipseczeroes); } #endif /* NB: m is reclaimed by ipsec_process_done. */ error = ipsec_process_done(m, isr); KEY_FREESAV(&sav); IPSECREQUEST_UNLOCK(isr); KEY_FREESP(&isr->sp); return (error); bad: if (sav) KEY_FREESAV(&sav); IPSECREQUEST_UNLOCK(isr); KEY_FREESP(&isr->sp); if (m) m_freem(m); free(tc, M_XDATA); crypto_freereq(crp); return (error); } static struct xformsw ah_xformsw = { XF_AH, XFT_AUTH, "IPsec AH", ah_init, ah_zeroize, ah_input, ah_output, }; static void ah_attach(void) { xform_register(&ah_xformsw); } SYSINIT(ah_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, ah_attach, NULL); Index: head/sys/netipsec/xform_esp.c =================================================================== --- head/sys/netipsec/xform_esp.c (revision 285335) +++ head/sys/netipsec/xform_esp.c (revision 285336) @@ -1,996 +1,1037 @@ /* $FreeBSD$ */ /* $OpenBSD: ip_esp.c,v 1.69 2001/06/26 06:18:59 angelos Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr) and * Niels Provos (provos@physnet.uni-hamburg.de). * * The original version of this code was written by John Ioannidis * for BSD/OS in Athens, Greece, in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * Copyright (c) 2001 Angelos D. Keromytis. * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #endif #include #include #include #include VNET_DEFINE(int, esp_enable) = 1; VNET_PCPUSTAT_DEFINE(struct espstat, espstat); VNET_PCPUSTAT_SYSINIT(espstat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(espstat); #endif /* VIMAGE */ SYSCTL_DECL(_net_inet_esp); SYSCTL_INT(_net_inet_esp, OID_AUTO, esp_enable, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(esp_enable), 0, ""); SYSCTL_VNET_PCPUSTAT(_net_inet_esp, IPSECCTL_STATS, stats, struct espstat, espstat, "ESP statistics (struct espstat, netipsec/esp_var.h"); static int esp_input_cb(struct cryptop *op); static int esp_output_cb(struct cryptop *crp); /* * NB: this is public for use by the PF_KEY support. * NB: if you add support here; be sure to add code to esp_attach below! */ struct enc_xform * esp_algorithm_lookup(int alg) { if (alg >= ESP_ALG_MAX) return NULL; switch (alg) { case SADB_EALG_DESCBC: return &enc_xform_des; case SADB_EALG_3DESCBC: return &enc_xform_3des; case SADB_X_EALG_AES: return &enc_xform_rijndael128; case SADB_X_EALG_BLOWFISHCBC: return &enc_xform_blf; case SADB_X_EALG_CAST128CBC: return &enc_xform_cast5; case SADB_X_EALG_SKIPJACK: return &enc_xform_skipjack; case SADB_EALG_NULL: return &enc_xform_null; case SADB_X_EALG_CAMELLIACBC: return &enc_xform_camellia; + case SADB_X_EALG_AESCTR: + return &enc_xform_aes_icm; + case SADB_X_EALG_AESGCM16: + return &enc_xform_aes_nist_gcm; + case SADB_X_EALG_AESGMAC: + return &enc_xform_aes_nist_gmac; } return NULL; } size_t esp_hdrsiz(struct secasvar *sav) { size_t size; if (sav != NULL) { /*XXX not right for null algorithm--does it matter??*/ IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("SA with null xform")); if (sav->flags & SADB_X_EXT_OLD) size = sizeof (struct esp); else size = sizeof (struct newesp); size += sav->tdb_encalgxform->blocksize + 9; /*XXX need alg check???*/ if (sav->tdb_authalgxform != NULL && sav->replay) size += ah_hdrsiz(sav); } else { /* * base header size * + max iv length for CBC mode * + max pad length * + sizeof (pad length field) * + sizeof (next header field) * + max icv supported. */ size = sizeof (struct newesp) + EALG_MAX_BLOCK_LEN + 9 + 16; } return size; } /* * esp_init() is called when an SPI is being set up. */ static int esp_init(struct secasvar *sav, struct xformsw *xsp) { struct enc_xform *txform; struct cryptoini cria, crie; int keylen; int error; txform = esp_algorithm_lookup(sav->alg_enc); if (txform == NULL) { DPRINTF(("%s: unsupported encryption algorithm %d\n", __func__, sav->alg_enc)); return EINVAL; } if (sav->key_enc == NULL) { DPRINTF(("%s: no encoding key for %s algorithm\n", __func__, txform->name)); return EINVAL; } if ((sav->flags&(SADB_X_EXT_OLD|SADB_X_EXT_IV4B)) == SADB_X_EXT_IV4B) { DPRINTF(("%s: 4-byte IV not supported with protocol\n", __func__)); return EINVAL; } keylen = _KEYLEN(sav->key_enc); if (txform->minkey > keylen || keylen > txform->maxkey) { DPRINTF(("%s: invalid key length %u, must be in the range " "[%u..%u] for algorithm %s\n", __func__, keylen, txform->minkey, txform->maxkey, txform->name)); return EINVAL; } /* * NB: The null xform needs a non-zero blocksize to keep the * crypto code happy but if we use it to set ivlen then * the ESP header will be processed incorrectly. The * compromise is to force it to zero here. */ - sav->ivlen = (txform == &enc_xform_null ? 0 : txform->blocksize); + sav->ivlen = (txform == &enc_xform_null ? 0 : txform->ivsize); sav->iv = (caddr_t) malloc(sav->ivlen, M_XDATA, M_WAITOK); key_randomfill(sav->iv, sav->ivlen); /*XXX*/ /* * Setup AH-related state. */ if (sav->alg_auth != 0) { error = ah_init0(sav, xsp, &cria); if (error) return error; } /* NB: override anything set in ah_init0 */ sav->tdb_xform = xsp; sav->tdb_encalgxform = txform; + /* + * Whenever AES-GCM is used for encryption, one + * of the AES authentication algorithms is chosen + * as well, based on the key size. + */ + if (sav->alg_enc == SADB_X_EALG_AESGCM16) { + switch (keylen) { + case AES_128_HMAC_KEY_LEN: + sav->alg_auth = SADB_X_AALG_AES128GMAC; + sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_128; + break; + case AES_192_HMAC_KEY_LEN: + sav->alg_auth = SADB_X_AALG_AES192GMAC; + sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_192; + break; + case AES_256_HMAC_KEY_LEN: + sav->alg_auth = SADB_X_AALG_AES256GMAC; + sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_256; + break; + default: + DPRINTF(("%s: invalid key length %u" + "for algorithm %s\n", __func__, + keylen, txform->name)); + return EINVAL; + } + bzero(&cria, sizeof(cria)); + cria.cri_alg = sav->tdb_authalgxform->type; + cria.cri_klen = _KEYBITS(sav->key_enc) + 4; + cria.cri_key = sav->key_enc->key_data; + } + /* Initialize crypto session. */ bzero(&crie, sizeof (crie)); crie.cri_alg = sav->tdb_encalgxform->type; crie.cri_klen = _KEYBITS(sav->key_enc); crie.cri_key = sav->key_enc->key_data; + if (sav->alg_enc == SADB_X_EALG_AESGCM16) + arc4rand(crie.cri_iv, sav->ivlen, 0); + /* XXX Rounds ? */ if (sav->tdb_authalgxform && sav->tdb_encalgxform) { /* init both auth & enc */ crie.cri_next = &cria; error = crypto_newsession(&sav->tdb_cryptoid, &crie, V_crypto_support); } else if (sav->tdb_encalgxform) { error = crypto_newsession(&sav->tdb_cryptoid, &crie, V_crypto_support); } else if (sav->tdb_authalgxform) { error = crypto_newsession(&sav->tdb_cryptoid, &cria, V_crypto_support); } else { /* XXX cannot happen? */ DPRINTF(("%s: no encoding OR authentication xform!\n", __func__)); error = EINVAL; } return error; } /* * Paranoia. */ static int esp_zeroize(struct secasvar *sav) { /* NB: ah_zerorize free's the crypto session state */ int error = ah_zeroize(sav); if (sav->key_enc) bzero(sav->key_enc->key_data, _KEYLEN(sav->key_enc)); if (sav->iv) { free(sav->iv, M_XDATA); sav->iv = NULL; } sav->tdb_encalgxform = NULL; sav->tdb_xform = NULL; return error; } /* * ESP input processing, called (eventually) through the protocol switch. */ static int esp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { char buf[128]; struct auth_hash *esph; struct enc_xform *espx; struct tdb_crypto *tc; int plen, alen, hlen; struct newesp *esp; struct cryptodesc *crde; struct cryptop *crp; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("null encoding xform")); alen = 0; /* Valid IP Packet length ? */ if ( (skip&3) || (m->m_pkthdr.len&3) ){ DPRINTF(("%s: misaligned packet, skip %u pkt len %u", __func__, skip, m->m_pkthdr.len)); ESPSTAT_INC(esps_badilen); m_freem(m); return EINVAL; } - /* XXX don't pullup, just copy header */ IP6_EXTHDR_GET(esp, struct newesp *, m, skip, sizeof (struct newesp)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Authenticator hash size */ - if (esph != NULL) { - switch (esph->type) { - case CRYPTO_SHA2_256_HMAC: - case CRYPTO_SHA2_384_HMAC: - case CRYPTO_SHA2_512_HMAC: - alen = esph->hashsize/2; - break; - default: - alen = AH_HMAC_HASHLEN; - break; - } - } + alen = esph ? esph->hashsize : 0; /* * Verify payload length is multiple of encryption algorithm * block size. * * NB: This works for the null algorithm because the blocksize * is 4 and all packets must be 4-byte aligned regardless * of the algorithm. */ plen = m->m_pkthdr.len - (skip + hlen + alen); if ((plen & (espx->blocksize - 1)) || (plen <= 0)) { - DPRINTF(("%s: payload of %d octets not a multiple of %d octets," - " SA %s/%08lx\n", __func__, - plen, espx->blocksize, ipsec_address(&sav->sah->saidx.dst, - buf, sizeof(buf)), (u_long) ntohl(sav->spi))); - ESPSTAT_INC(esps_badilen); - m_freem(m); - return EINVAL; + if (!espx || sav->alg_enc != SADB_X_EALG_AESGCM16) { + DPRINTF(("%s: payload of %d octets not a multiple of %d octets," + " SA %s/%08lx\n", __func__, + plen, espx->blocksize, ipsec_address(&sav->sah->saidx.dst, + buf, sizeof(buf)), (u_long) ntohl(sav->spi))); + ESPSTAT_INC(esps_badilen); + m_freem(m); + return EINVAL; + } } /* * Check sequence number. */ if (esph != NULL && sav->replay != NULL && !ipsec_chkreplay(ntohl(esp->esp_seq), sav)) { DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_logsastr(sav, buf, sizeof(buf)))); /*XXX*/ ESPSTAT_INC(esps_replay); m_freem(m); return ENOBUFS; /*XXX*/ } /* Update the counters */ ESPSTAT_ADD(esps_ibytes, m->m_pkthdr.len - (skip + hlen + alen)); /* Get crypto descriptors */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); ESPSTAT_INC(esps_crypto); m_freem(m); return ENOBUFS; } /* Get IPsec-specific opaque pointer */ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto) + alen, M_XDATA, M_NOWAIT | M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__)); ESPSTAT_INC(esps_crypto); m_freem(m); return ENOBUFS; } if (esph != NULL) { struct cryptodesc *crda = crp->crp_desc; IPSEC_ASSERT(crda != NULL, ("null ah crypto descriptor")); /* Authentication descriptor */ crda->crd_skip = skip; - crda->crd_len = m->m_pkthdr.len - (skip + alen); + if (espx && espx->type == CRYPTO_AES_NIST_GCM_16) + crda->crd_len = hlen - sav->ivlen; + else + crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; crda->crd_alg = esph->type; - crda->crd_key = sav->key_auth->key_data; - crda->crd_klen = _KEYBITS(sav->key_auth); + if (espx && (espx->type == CRYPTO_AES_NIST_GCM_16)) { + crda->crd_key = sav->key_enc->key_data; + crda->crd_klen = _KEYBITS(sav->key_enc); + } else { + crda->crd_key = sav->key_auth->key_data; + crda->crd_klen = _KEYBITS(sav->key_auth); + } /* Copy the authenticator */ m_copydata(m, m->m_pkthdr.len - alen, alen, (caddr_t) (tc + 1)); /* Chain authentication request */ crde = crda->crd_next; } else { crde = crp->crp_desc; } /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_input_cb; crp->crp_sid = sav->tdb_cryptoid; crp->crp_opaque = (caddr_t) tc; /* These are passed as-is to the callback */ tc->tc_spi = sav->spi; tc->tc_dst = sav->sah->saidx.dst; tc->tc_proto = sav->sah->saidx.proto; tc->tc_protoff = protoff; tc->tc_skip = skip; KEY_ADDREFSA(sav); tc->tc_sav = sav; /* Decryption descriptor */ IPSEC_ASSERT(crde != NULL, ("null esp crypto descriptor")); crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_inject = skip + hlen - sav->ivlen; crde->crd_alg = espx->type; crde->crd_key = sav->key_enc->key_data; crde->crd_klen = _KEYBITS(sav->key_enc); + if (espx && (espx->type == CRYPTO_AES_NIST_GCM_16)) + crde->crd_flags |= CRD_F_IV_EXPLICIT; + /* XXX Rounds ? */ return (crypto_dispatch(crp)); } /* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { char buf[128]; u_int8_t lastthree[3], aalg[AH_HMAC_MAXHASHLEN]; int hlen, skip, protoff, error, alen; struct mbuf *m; struct cryptodesc *crd; struct auth_hash *esph; struct enc_xform *espx; struct tdb_crypto *tc; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; crd = crp->crp_desc; IPSEC_ASSERT(crd != NULL, ("null crypto descriptor!")); tc = (struct tdb_crypto *) crp->crp_opaque; IPSEC_ASSERT(tc != NULL, ("null opaque crypto data area!")); skip = tc->tc_skip; protoff = tc->tc_protoff; m = (struct mbuf *) crp->crp_buf; sav = tc->tc_sav; IPSEC_ASSERT(sav != NULL, ("null SA!")); saidx = &sav->sah->saidx; IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("unexpected protocol family %u", saidx->dst.sa.sa_family)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Check for crypto errors */ if (crp->crp_etype) { /* Reset the session ID */ if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) return (crypto_dispatch(crp)); ESPSTAT_INC(esps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { ESPSTAT_INC(esps_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } ESPSTAT_INC(esps_hist[sav->alg_enc]); /* If authentication was performed, check now. */ if (esph != NULL) { - switch (esph->type) { - case CRYPTO_SHA2_256_HMAC: - case CRYPTO_SHA2_384_HMAC: - case CRYPTO_SHA2_512_HMAC: - alen = esph->hashsize/2; - break; - default: - alen = AH_HMAC_HASHLEN; - break; - } + alen = esph->hashsize; AHSTAT_INC(ahs_hist[sav->alg_auth]); /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - alen, alen, aalg); ptr = (caddr_t) (tc + 1); /* Verify authenticator */ if (bcmp(ptr, aalg, alen) != 0) { DPRINTF(("%s: authentication hash mismatch for " "packet in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_badauth); error = EACCES; goto bad; } /* Remove trailing authenticator */ m_adj(m, -alen); } /* Release the crypto descriptors */ free(tc, M_XDATA), tc = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newesp, esp_seq), sizeof (seq), (caddr_t) &seq); if (ipsec_updatereplay(ntohl(seq), sav)) { DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_logsastr(sav, buf, sizeof(buf)))); ESPSTAT_INC(esps_replay); error = ENOBUFS; goto bad; } } /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Remove the ESP header and IV from the mbuf. */ error = m_striphdr(m, skip, hlen); if (error) { ESPSTAT_INC(esps_hdrops); DPRINTF(("%s: bad mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); goto bad; } /* Save the last three bytes of decrypted data */ m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree); /* Verify pad length */ if (lastthree[1] + 2 > m->m_pkthdr.len - skip) { ESPSTAT_INC(esps_badilen); DPRINTF(("%s: invalid padding length %d for %u byte packet " "in SA %s/%08lx\n", __func__, lastthree[1], m->m_pkthdr.len - skip, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = EINVAL; goto bad; } /* Verify correct decryption by checking the last padding bytes */ if ((sav->flags & SADB_X_EXT_PMASK) != SADB_X_EXT_PRAND) { if (lastthree[1] != lastthree[0] && lastthree[1] != 0) { ESPSTAT_INC(esps_badenc); DPRINTF(("%s: decryption failed for packet in " "SA %s/%08lx\n", __func__, ipsec_address( &sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = EINVAL; goto bad; } } /* Trim the mbuf chain to remove trailing authenticator and padding */ m_adj(m, -(lastthree[1] + 2)); /* Restore the Next Protocol field */ m_copyback(m, protoff, sizeof (u_int8_t), lastthree + 2); switch (saidx->dst.sa.sa_family) { #ifdef INET6 case AF_INET6: error = ipsec6_common_input_cb(m, sav, skip, protoff); break; #endif #ifdef INET case AF_INET: error = ipsec4_common_input_cb(m, sav, skip, protoff); break; #endif default: panic("%s: Unexpected address family: %d saidx=%p", __func__, saidx->dst.sa.sa_family, saidx); } KEY_FREESAV(&sav); return error; bad: if (sav) KEY_FREESAV(&sav); if (m != NULL) m_freem(m); if (tc != NULL) free(tc, M_XDATA); if (crp != NULL) crypto_freereq(crp); return error; } /* * ESP output routine, called by ipsec[46]_process_packet(). */ static int esp_output(struct mbuf *m, struct ipsecrequest *isr, struct mbuf **mp, int skip, int protoff) { char buf[INET6_ADDRSTRLEN]; struct enc_xform *espx; struct auth_hash *esph; int hlen, rlen, padding, blks, alen, i, roff; struct mbuf *mo = (struct mbuf *) NULL; struct tdb_crypto *tc; struct secasvar *sav; struct secasindex *saidx; unsigned char *pad; u_int8_t prot; int error, maxpacketsize; struct cryptodesc *crde = NULL, *crda = NULL; struct cryptop *crp; sav = isr->sav; IPSEC_ASSERT(sav != NULL, ("null SA")); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; IPSEC_ASSERT(espx != NULL, ("null encoding xform")); if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; rlen = m->m_pkthdr.len - skip; /* Raw payload length. */ /* * NB: The null encoding transform has a blocksize of 4 * so that headers are properly aligned. */ - blks = espx->blocksize; /* IV blocksize */ + blks = espx->ivsize; /* IV blocksize */ /* XXX clamp padding length a la KAME??? */ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2; if (esph) - switch (esph->type) { - case CRYPTO_SHA2_256_HMAC: - case CRYPTO_SHA2_384_HMAC: - case CRYPTO_SHA2_512_HMAC: - alen = esph->hashsize/2; - break; - default: - alen = AH_HMAC_HASHLEN; - break; - } + alen = esph->hashsize; else alen = 0; ESPSTAT_INC(esps_output); saidx = &sav->sah->saidx; /* Check for maximum packet size violations. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("%s: unknown/unsupported protocol " "family %d, SA %s/%08lx\n", __func__, saidx->dst.sa.sa_family, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_nopf); error = EPFNOSUPPORT; goto bad; } + DPRINTF(("%s: skip %d hlen %d rlen %d padding %d alen %d blksd %d\n", + __func__, skip, hlen, rlen, padding, alen, blks)); if (skip + hlen + rlen + padding + alen > maxpacketsize) { DPRINTF(("%s: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi), skip + hlen + rlen + padding + alen, maxpacketsize)); ESPSTAT_INC(esps_toobig); error = EMSGSIZE; goto bad; } /* Update the counters. */ ESPSTAT_ADD(esps_obytes, m->m_pkthdr.len - skip); m = m_unshare(m, M_NOWAIT); if (m == NULL) { DPRINTF(("%s: cannot clone mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_hdrops); error = ENOBUFS; goto bad; } /* Inject ESP header. */ mo = m_makespace(m, skip, hlen, &roff); if (mo == NULL) { DPRINTF(("%s: %u byte ESP hdr inject failed for SA %s/%08lx\n", __func__, hlen, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_hdrops); /* XXX diffs from openbsd */ error = ENOBUFS; goto bad; } /* Initialize ESP header. */ bcopy((caddr_t) &sav->spi, mtod(mo, caddr_t) + roff, sizeof(u_int32_t)); if (sav->replay) { u_int32_t replay; #ifdef REGRESSION /* Emulate replay attack when ipsec_replay is TRUE. */ if (!V_ipsec_replay) #endif sav->replay->count++; replay = htonl(sav->replay->count); bcopy((caddr_t) &replay, mtod(mo, caddr_t) + roff + sizeof(u_int32_t), sizeof(u_int32_t)); } /* * Add padding -- better to do it ourselves than use the crypto engine, * although if/when we support compression, we'd have to do that. */ pad = (u_char *) m_pad(m, padding + alen); if (pad == NULL) { DPRINTF(("%s: m_pad failed for SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); m = NULL; /* NB: free'd by m_pad */ error = ENOBUFS; goto bad; } /* * Add padding: random, zero, or self-describing. * XXX catch unexpected setting */ switch (sav->flags & SADB_X_EXT_PMASK) { case SADB_X_EXT_PRAND: (void) read_random(pad, padding - 2); break; case SADB_X_EXT_PZERO: bzero(pad, padding - 2); break; case SADB_X_EXT_PSEQ: for (i = 0; i < padding - 2; i++) pad[i] = i+1; break; } /* Fix padding length and Next Protocol in padding itself. */ pad[padding - 2] = padding - 2; m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1); /* Fix Next Protocol in IPv4/IPv6 header. */ prot = IPPROTO_ESP; m_copyback(m, protoff, sizeof(u_int8_t), (u_char *) &prot); /* Get crypto descriptors. */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); ESPSTAT_INC(esps_crypto); error = ENOBUFS; goto bad; } if (espx) { crde = crp->crp_desc; crda = crde->crd_next; /* Encryption descriptor. */ crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_flags = CRD_F_ENCRYPT; crde->crd_inject = skip + hlen - sav->ivlen; /* Encryption operation. */ crde->crd_alg = espx->type; crde->crd_key = sav->key_enc->key_data; crde->crd_klen = _KEYBITS(sav->key_enc); + if (espx->type == CRYPTO_AES_NIST_GCM_16) + crde->crd_flags |= CRD_F_IV_EXPLICIT; /* XXX Rounds ? */ } else crda = crp->crp_desc; /* IPsec-specific opaque crypto info. */ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto), M_XDATA, M_NOWAIT|M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__)); ESPSTAT_INC(esps_crypto); error = ENOBUFS; goto bad; } /* Callback parameters */ tc->tc_isr = isr; KEY_ADDREFSA(sav); tc->tc_sav = sav; tc->tc_spi = sav->spi; tc->tc_dst = saidx->dst; tc->tc_proto = saidx->proto; /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_output_cb; crp->crp_opaque = (caddr_t) tc; crp->crp_sid = sav->tdb_cryptoid; if (esph) { /* Authentication descriptor. */ crda->crd_skip = skip; - crda->crd_len = m->m_pkthdr.len - (skip + alen); + if (espx && espx->type == CRYPTO_AES_NIST_GCM_16) + crda->crd_len = hlen - sav->ivlen; + else + crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; /* Authentication operation. */ crda->crd_alg = esph->type; - crda->crd_key = sav->key_auth->key_data; - crda->crd_klen = _KEYBITS(sav->key_auth); + if (espx && espx->type == CRYPTO_AES_NIST_GCM_16) { + crda->crd_key = sav->key_enc->key_data; + crda->crd_klen = _KEYBITS(sav->key_enc); + } else { + crda->crd_key = sav->key_auth->key_data; + crda->crd_klen = _KEYBITS(sav->key_auth); + } + } return crypto_dispatch(crp); bad: if (m) m_freem(m); return (error); } /* * ESP output callback from the crypto driver. */ static int esp_output_cb(struct cryptop *crp) { char buf[INET6_ADDRSTRLEN]; struct tdb_crypto *tc; struct ipsecrequest *isr; struct secasvar *sav; struct mbuf *m; int error; tc = (struct tdb_crypto *) crp->crp_opaque; IPSEC_ASSERT(tc != NULL, ("null opaque data area!")); m = (struct mbuf *) crp->crp_buf; isr = tc->tc_isr; IPSEC_ASSERT(isr->sp != NULL, ("NULL isr->sp")); IPSECREQUEST_LOCK(isr); sav = tc->tc_sav; /* With the isr lock released SA pointer can be updated. */ if (sav != isr->sav) { ESPSTAT_INC(esps_notdb); DPRINTF(("%s: SA gone during crypto (SA %s/%08lx proto %u)\n", __func__, ipsec_address(&tc->tc_dst, buf, sizeof(buf)), (u_long) ntohl(tc->tc_spi), tc->tc_proto)); error = ENOBUFS; /*XXX*/ goto bad; } /* Check for crypto errors. */ if (crp->crp_etype) { /* Reset session ID. */ if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) { IPSECREQUEST_UNLOCK(isr); return (crypto_dispatch(crp)); } ESPSTAT_INC(esps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { ESPSTAT_INC(esps_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } ESPSTAT_INC(esps_hist[sav->alg_enc]); if (sav->tdb_authalgxform != NULL) AHSTAT_INC(ahs_hist[sav->alg_auth]); /* Release crypto descriptors. */ free(tc, M_XDATA); crypto_freereq(crp); #ifdef REGRESSION /* Emulate man-in-the-middle attack when ipsec_integrity is TRUE. */ if (V_ipsec_integrity) { static unsigned char ipseczeroes[AH_HMAC_MAXHASHLEN]; struct auth_hash *esph; /* * Corrupt HMAC if we want to test integrity verification of * the other side. */ esph = sav->tdb_authalgxform; if (esph != NULL) { int alen; switch (esph->type) { case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: alen = esph->hashsize/2; + break; + case CRYPTO_AES_128_GMAC: + case CRYPTO_AES_192_GMAC: + case CRYPTO_AES_256_GMAC: + alen = esph->hashsize; break; default: alen = AH_HMAC_HASHLEN; break; } m_copyback(m, m->m_pkthdr.len - alen, alen, ipseczeroes); } } #endif /* NB: m is reclaimed by ipsec_process_done. */ error = ipsec_process_done(m, isr); KEY_FREESAV(&sav); IPSECREQUEST_UNLOCK(isr); KEY_FREESP(&isr->sp); return (error); bad: if (sav) KEY_FREESAV(&sav); IPSECREQUEST_UNLOCK(isr); KEY_FREESP(&isr->sp); if (m) m_freem(m); free(tc, M_XDATA); crypto_freereq(crp); return (error); } static struct xformsw esp_xformsw = { XF_ESP, XFT_CONF|XFT_AUTH, "IPsec ESP", esp_init, esp_zeroize, esp_input, esp_output }; static void esp_attach(void) { xform_register(&esp_xformsw); } SYSINIT(esp_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, esp_attach, NULL); Index: head/sys/opencrypto/cryptodev.h =================================================================== --- head/sys/opencrypto/cryptodev.h (revision 285335) +++ head/sys/opencrypto/cryptodev.h (revision 285336) @@ -1,478 +1,525 @@ /* $FreeBSD$ */ /* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. * * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef _CRYPTO_CRYPTO_H_ #define _CRYPTO_CRYPTO_H_ #include /* Some initial values */ #define CRYPTO_DRIVERS_INITIAL 4 #define CRYPTO_SW_SESSIONS 32 /* Hash values */ #define NULL_HASH_LEN 16 #define MD5_HASH_LEN 16 #define SHA1_HASH_LEN 20 #define RIPEMD160_HASH_LEN 20 #define SHA2_256_HASH_LEN 32 #define SHA2_384_HASH_LEN 48 #define SHA2_512_HASH_LEN 64 #define MD5_KPDK_HASH_LEN 16 #define SHA1_KPDK_HASH_LEN 20 +#define AES_HASH_LEN 16 /* Maximum hash algorithm result length */ #define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */ /* HMAC values */ #define NULL_HMAC_BLOCK_LEN 64 #define MD5_HMAC_BLOCK_LEN 64 #define SHA1_HMAC_BLOCK_LEN 64 #define RIPEMD160_HMAC_BLOCK_LEN 64 -#define SHA2_256_HMAC_BLOCK_LEN 64 -#define SHA2_384_HMAC_BLOCK_LEN 128 -#define SHA2_512_HMAC_BLOCK_LEN 128 +#define SHA2_256_HMAC_BLOCK_LEN 64 +#define SHA2_384_HMAC_BLOCK_LEN 128 +#define SHA2_512_HMAC_BLOCK_LEN 128 /* Maximum HMAC block length */ -#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */ +#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */ #define HMAC_IPAD_VAL 0x36 #define HMAC_OPAD_VAL 0x5C +/* HMAC Key Length */ +#define NULL_HMAC_KEY_LEN 0 +#define MD5_HMAC_KEY_LEN 16 +#define SHA1_HMAC_KEY_LEN 20 +#define RIPEMD160_HMAC_KEY_LEN 20 +#define SHA2_256_HMAC_KEY_LEN 32 +#define SHA2_384_HMAC_KEY_LEN 48 +#define SHA2_512_HMAC_KEY_LEN 64 +#define AES_128_HMAC_KEY_LEN 16 +#define AES_192_HMAC_KEY_LEN 24 +#define AES_256_HMAC_KEY_LEN 32 /* Encryption algorithm block sizes */ -#define NULL_BLOCK_LEN 4 -#define DES_BLOCK_LEN 8 -#define DES3_BLOCK_LEN 8 -#define BLOWFISH_BLOCK_LEN 8 -#define SKIPJACK_BLOCK_LEN 8 -#define CAST128_BLOCK_LEN 8 -#define RIJNDAEL128_BLOCK_LEN 16 -#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN -#define CAMELLIA_BLOCK_LEN 16 -#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */ +#define NULL_BLOCK_LEN 4 +#define DES_BLOCK_LEN 8 +#define DES3_BLOCK_LEN 8 +#define BLOWFISH_BLOCK_LEN 8 +#define SKIPJACK_BLOCK_LEN 8 +#define CAST128_BLOCK_LEN 8 +#define RIJNDAEL128_BLOCK_LEN 16 +#define AES_BLOCK_LEN 16 +#define AES_MIN_BLOCK_LEN 1 +#define ARC4_BLOCK_LEN 1 +#define CAMELLIA_BLOCK_LEN 16 +#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */ +/* IV Lengths */ + +#define ARC4_IV_LEN 1 +#define AES_IV_LEN 12 +#define AES_XTS_IV_LEN 8 +#define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ + +#define AES_CTR_NONCE_SIZE 4 + +/* Min and Max Encryption Key Sizes */ +#define NULL_MIN_KEY 0 +#define NULL_MAX_KEY 256 /* 2048 bits, max key */ +#define DES_MIN_KEY 8 +#define DES_MAX_KEY DES_MIN_KEY +#define TRIPLE_DES_MIN_KEY 24 +#define TRIPLE_DES_MAX_KEY TRIPLE_DES_MIN_KEY +#define BLOWFISH_MIN_KEY 5 +#define BLOWFISH_MAX_KEY 56 /* 448 bits, max key */ +#define CAST_MIN_KEY 5 +#define CAST_MAX_KEY 16 +#define SKIPJACK_MIN_KEY 10 +#define SKIPJACK_MAX_KEY SKIPJACK_MIN_KEY +#define RIJNDAEL_MIN_KEY 16 +#define RIJNDAEL_MAX_KEY 32 +#define AES_MIN_KEY 16 +#define AES_MAX_KEY 32 +#define AES_XTS_MIN_KEY 32 +#define AES_XTS_MAX_KEY 64 +#define ARC4_MIN_KEY 1 +#define ARC4_MAX_KEY 32 +#define CAMELLIA_MIN_KEY 8 +#define CAMELLIA_MAX_KEY 32 + /* Maximum hash algorithm result length */ -#define AALG_MAX_RESULT_LEN 64 /* Keep this updated */ +#define AALG_MAX_RESULT_LEN 64 /* Keep this updated */ #define CRYPTO_ALGORITHM_MIN 1 #define CRYPTO_DES_CBC 1 #define CRYPTO_3DES_CBC 2 #define CRYPTO_BLF_CBC 3 #define CRYPTO_CAST_CBC 4 #define CRYPTO_SKIPJACK_CBC 5 #define CRYPTO_MD5_HMAC 6 #define CRYPTO_SHA1_HMAC 7 #define CRYPTO_RIPEMD160_HMAC 8 #define CRYPTO_MD5_KPDK 9 #define CRYPTO_SHA1_KPDK 10 #define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */ #define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */ #define CRYPTO_ARC4 12 #define CRYPTO_MD5 13 #define CRYPTO_SHA1 14 #define CRYPTO_NULL_HMAC 15 #define CRYPTO_NULL_CBC 16 #define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */ #define CRYPTO_SHA2_256_HMAC 18 #define CRYPTO_SHA2_384_HMAC 19 #define CRYPTO_SHA2_512_HMAC 20 #define CRYPTO_CAMELLIA_CBC 21 #define CRYPTO_AES_XTS 22 #define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */ #define CRYPTO_AES_NIST_GMAC 24 /* cipher side */ #define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */ #define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */ #define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */ #define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */ #define CRYPTO_ALGORITHM_MAX 28 /* Keep updated - see below */ -#define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \ +#define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \ (x) <= CRYPTO_ALGORITHM_MAX) /* Algorithm flags */ #define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */ #define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */ #define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */ /* * Crypto driver/device flags. They can set in the crid * parameter when creating a session or submitting a key * op to affect the device/driver assigned. If neither * of these are specified then the crid is assumed to hold * the driver id of an existing (and suitable) device that * must be used to satisfy the request. */ #define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ #define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ /* NB: deprecated */ struct session_op { u_int32_t cipher; /* ie. CRYPTO_DES_CBC */ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */ u_int32_t keylen; /* cipher key */ caddr_t key; int mackeylen; /* mac key */ caddr_t mackey; u_int32_t ses; /* returns: session # */ }; struct session2_op { u_int32_t cipher; /* ie. CRYPTO_DES_CBC */ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */ u_int32_t keylen; /* cipher key */ caddr_t key; int mackeylen; /* mac key */ caddr_t mackey; u_int32_t ses; /* returns: session # */ int crid; /* driver id + flags (rw) */ int pad[4]; /* for future expansion */ }; struct crypt_op { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ #define COP_ENCRYPT 1 #define COP_DECRYPT 2 u_int16_t flags; #define COP_F_BATCH 0x0008 /* Batch op if possible */ u_int len; caddr_t src, dst; /* become iov[] inside kernel */ caddr_t mac; /* must be big enough for chosen MAC */ caddr_t iv; }; /* op and flags the same as crypt_op */ struct crypt_aead { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ u_int16_t flags; u_int len; u_int aadlen; u_int ivlen; caddr_t src, dst; /* become iov[] inside kernel */ caddr_t aad; /* additional authenticated data */ caddr_t tag; /* must fit for chosen TAG length */ caddr_t iv; }; /* * Parameters for looking up a crypto driver/device by * device name or by id. The latter are returned for * created sessions (crid) and completed key operations. */ struct crypt_find_op { int crid; /* driver id + flags */ char name[32]; /* device/driver name */ }; /* bignum parameter, in packed bytes, ... */ struct crparam { caddr_t crp_p; u_int crp_nbits; }; #define CRK_MAXPARAM 8 struct crypt_kop { u_int crk_op; /* ie. CRK_MOD_EXP or other */ u_int crk_status; /* return status */ u_short crk_iparams; /* # of input parameters */ u_short crk_oparams; /* # of output parameters */ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */ struct crparam crk_param[CRK_MAXPARAM]; }; #define CRK_ALGORITM_MIN 0 #define CRK_MOD_EXP 0 #define CRK_MOD_EXP_CRT 1 #define CRK_DSA_SIGN 2 #define CRK_DSA_VERIFY 3 #define CRK_DH_COMPUTE_KEY 4 #define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */ #define CRF_MOD_EXP (1 << CRK_MOD_EXP) #define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT) #define CRF_DSA_SIGN (1 << CRK_DSA_SIGN) #define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY) #define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY) /* * done against open of /dev/crypto, to get a cloned descriptor. * Please use F_SETFD against the cloned descriptor. */ #define CRIOGET _IOWR('c', 100, u_int32_t) #define CRIOASYMFEAT CIOCASYMFEAT #define CRIOFINDDEV CIOCFINDDEV /* the following are done against the cloned descriptor */ #define CIOCGSESSION _IOWR('c', 101, struct session_op) #define CIOCFSESSION _IOW('c', 102, u_int32_t) #define CIOCCRYPT _IOWR('c', 103, struct crypt_op) #define CIOCKEY _IOWR('c', 104, struct crypt_kop) #define CIOCASYMFEAT _IOR('c', 105, u_int32_t) #define CIOCGSESSION2 _IOWR('c', 106, struct session2_op) #define CIOCKEY2 _IOWR('c', 107, struct crypt_kop) #define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op) #define CIOCCRYPTAEAD _IOWR('c', 109, struct crypt_aead) struct cryptotstat { struct timespec acc; /* total accumulated time */ struct timespec min; /* min time */ struct timespec max; /* max time */ u_int32_t count; /* number of observations */ }; struct cryptostats { u_int32_t cs_ops; /* symmetric crypto ops submitted */ u_int32_t cs_errs; /* symmetric crypto ops that failed */ u_int32_t cs_kops; /* asymetric/key ops submitted */ u_int32_t cs_kerrs; /* asymetric/key ops that failed */ u_int32_t cs_intrs; /* crypto swi thread activations */ u_int32_t cs_rets; /* crypto return thread activations */ u_int32_t cs_blocks; /* symmetric op driver block */ u_int32_t cs_kblocks; /* symmetric op driver block */ /* * When CRYPTO_TIMING is defined at compile time and the * sysctl debug.crypto is set to 1, the crypto system will * accumulate statistics about how long it takes to process * crypto requests at various points during processing. */ struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */ struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */ struct cryptotstat cs_cb; /* crypto_done -> callback */ struct cryptotstat cs_finis; /* callback -> callback return */ }; #ifdef _KERNEL #if 0 #define CRYPTDEB(s) do { printf("%s:%d: %s\n", __FILE__, __LINE__, s); \ } while (0) #else #define CRYPTDEB(s) do { } while (0) #endif /* Standard initialization structure beginning */ struct cryptoini { int cri_alg; /* Algorithm to use */ int cri_klen; /* Key length, in bits */ int cri_mlen; /* Number of bytes we want from the entire hash. 0 means all. */ caddr_t cri_key; /* key to use */ u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */ struct cryptoini *cri_next; }; /* Describe boundaries of a single crypto operation */ struct cryptodesc { int crd_skip; /* How many bytes to ignore from start */ int crd_len; /* How many bytes to process */ int crd_inject; /* Where to inject results, if applicable */ int crd_flags; #define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */ #define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in place, so don't copy. */ #define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */ #define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */ #define CRD_F_COMP 0x0f /* Set when doing compression */ #define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */ struct cryptoini CRD_INI; /* Initialization/context data */ #define crd_esn CRD_INI.cri_esn #define crd_iv CRD_INI.cri_iv #define crd_key CRD_INI.cri_key #define crd_alg CRD_INI.cri_alg #define crd_klen CRD_INI.cri_klen struct cryptodesc *crd_next; }; /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; u_int64_t crp_sid; /* Session ID */ int crp_ilen; /* Input data total length */ int crp_olen; /* Result total length */ int crp_etype; /* * Error type (zero means no error). * All error codes except EAGAIN * indicate possible data corruption (as in, * the data have been touched). On all * errors, the crp_sid may have changed * (reset to a new one), so the caller * should always check and use the new * value on future requests. */ int crp_flags; #define CRYPTO_F_IMBUF 0x0001 /* Input/output are mbuf chains */ #define CRYPTO_F_IOV 0x0002 /* Input/output are uio */ #define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */ #define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */ #define CRYPTO_F_DONE 0x0020 /* Operation completed */ #define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */ caddr_t crp_buf; /* Data to be processed */ caddr_t crp_opaque; /* Opaque pointer, passed along */ struct cryptodesc *crp_desc; /* Linked list of processing descriptors */ int (*crp_callback)(struct cryptop *); /* Callback function */ struct bintime crp_tstamp; /* performance time stamp */ }; #define CRYPTO_BUF_CONTIG 0x0 #define CRYPTO_BUF_IOV 0x1 #define CRYPTO_BUF_MBUF 0x2 #define CRYPTO_OP_DECRYPT 0x0 #define CRYPTO_OP_ENCRYPT 0x1 /* * Hints passed to process methods. */ #define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */ struct cryptkop { TAILQ_ENTRY(cryptkop) krp_next; u_int krp_op; /* ie. CRK_MOD_EXP or other */ u_int krp_status; /* return status */ u_short krp_iparams; /* # of input parameters */ u_short krp_oparams; /* # of output parameters */ u_int krp_crid; /* desired device, etc. */ u_int32_t krp_hid; struct crparam krp_param[CRK_MAXPARAM]; /* kvm */ int (*krp_callback)(struct cryptkop *); }; /* * Session ids are 64 bits. The lower 32 bits contain a "local id" which * is a driver-private session identifier. The upper 32 bits contain a * "hardware id" used by the core crypto code to identify the driver and * a copy of the driver's capabilities that can be used by client code to * optimize operation. */ #define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff) #define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000) #define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff) MALLOC_DECLARE(M_CRYPTO_DATA); extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard); extern int crypto_freesession(u_int64_t sid); #define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE #define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE #define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */ extern int32_t crypto_get_driverid(device_t dev, int flags); extern int crypto_find_driver(const char *); extern device_t crypto_find_device_byhid(int hid); extern int crypto_getcaps(int hid); extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, u_int32_t flags); extern int crypto_kregister(u_int32_t, int, u_int32_t); extern int crypto_unregister(u_int32_t driverid, int alg); extern int crypto_unregister_all(u_int32_t driverid); extern int crypto_dispatch(struct cryptop *crp); extern int crypto_kdispatch(struct cryptkop *); #define CRYPTO_SYMQ 0x1 #define CRYPTO_ASYMQ 0x2 extern int crypto_unblock(u_int32_t, int); extern void crypto_done(struct cryptop *crp); extern void crypto_kdone(struct cryptkop *); extern int crypto_getfeat(int *); extern void crypto_freereq(struct cryptop *crp); extern struct cryptop *crypto_getreq(int num); extern int crypto_usercrypto; /* userland may do crypto requests */ extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ extern int crypto_devallowsoft; /* only use hardware crypto */ /* * Crypto-related utility routines used mainly by drivers. * * XXX these don't really belong here; but for now they're * kept apart from the rest of the system. */ struct uio; extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp); extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp); extern int cuio_getptr(struct uio *uio, int loc, int *off); extern int cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int), void *arg); struct mbuf; struct iovec; extern int crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr, int *cnt, int *allocated); extern void crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in); extern void crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out); extern int crypto_apply(int flags, caddr_t buf, int off, int len, int (*f)(void *, void *, u_int), void *arg); #endif /* _KERNEL */ #endif /* _CRYPTO_CRYPTO_H_ */ Index: head/sys/opencrypto/xform.c =================================================================== --- head/sys/opencrypto/xform.c (revision 285335) +++ head/sys/opencrypto/xform.c (revision 285336) @@ -1,972 +1,984 @@ /* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr), * Niels Provos (provos@physnet.uni-hamburg.de) and * Damien Miller (djm@mindrot.org). * * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * AES XTS implementation in 2008 by Damien Miller * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * * Copyright (C) 2001, Angelos D. Keromytis. * * Copyright (C) 2008, Damien Miller * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int null_setkey(u_int8_t **, u_int8_t *, int); static int des1_setkey(u_int8_t **, u_int8_t *, int); static int des3_setkey(u_int8_t **, u_int8_t *, int); static int blf_setkey(u_int8_t **, u_int8_t *, int); static int cast5_setkey(u_int8_t **, u_int8_t *, int); static int skipjack_setkey(u_int8_t **, u_int8_t *, int); static int rijndael128_setkey(u_int8_t **, u_int8_t *, int); static int aes_icm_setkey(u_int8_t **, u_int8_t *, int); static int aes_xts_setkey(u_int8_t **, u_int8_t *, int); static int cml_setkey(u_int8_t **, u_int8_t *, int); static void null_encrypt(caddr_t, u_int8_t *); static void des1_encrypt(caddr_t, u_int8_t *); static void des3_encrypt(caddr_t, u_int8_t *); static void blf_encrypt(caddr_t, u_int8_t *); static void cast5_encrypt(caddr_t, u_int8_t *); static void skipjack_encrypt(caddr_t, u_int8_t *); static void rijndael128_encrypt(caddr_t, u_int8_t *); static void aes_xts_encrypt(caddr_t, u_int8_t *); static void cml_encrypt(caddr_t, u_int8_t *); static void null_decrypt(caddr_t, u_int8_t *); static void des1_decrypt(caddr_t, u_int8_t *); static void des3_decrypt(caddr_t, u_int8_t *); static void blf_decrypt(caddr_t, u_int8_t *); static void cast5_decrypt(caddr_t, u_int8_t *); static void skipjack_decrypt(caddr_t, u_int8_t *); static void rijndael128_decrypt(caddr_t, u_int8_t *); static void aes_xts_decrypt(caddr_t, u_int8_t *); static void cml_decrypt(caddr_t, u_int8_t *); static void aes_icm_crypt(caddr_t, u_int8_t *); static void null_zerokey(u_int8_t **); static void des1_zerokey(u_int8_t **); static void des3_zerokey(u_int8_t **); static void blf_zerokey(u_int8_t **); static void cast5_zerokey(u_int8_t **); static void skipjack_zerokey(u_int8_t **); static void rijndael128_zerokey(u_int8_t **); static void aes_icm_zerokey(u_int8_t **); static void aes_xts_zerokey(u_int8_t **); static void cml_zerokey(u_int8_t **); static void aes_icm_reinit(caddr_t, u_int8_t *); static void aes_xts_reinit(caddr_t, u_int8_t *); static void aes_gcm_reinit(caddr_t, u_int8_t *); static void null_init(void *); static void null_reinit(void *ctx, const u_int8_t *buf, u_int16_t len); static int null_update(void *, const u_int8_t *, u_int16_t); static void null_final(u_int8_t *, void *); static int MD5Update_int(void *, const u_int8_t *, u_int16_t); static void SHA1Init_int(void *); static int SHA1Update_int(void *, const u_int8_t *, u_int16_t); static void SHA1Final_int(u_int8_t *, void *); static int RMD160Update_int(void *, const u_int8_t *, u_int16_t); static int SHA256Update_int(void *, const u_int8_t *, u_int16_t); static int SHA384Update_int(void *, const u_int8_t *, u_int16_t); static int SHA512Update_int(void *, const u_int8_t *, u_int16_t); static u_int32_t deflate_compress(u_int8_t *, u_int32_t, u_int8_t **); static u_int32_t deflate_decompress(u_int8_t *, u_int32_t, u_int8_t **); #define AESICM_BLOCKSIZE 16 struct aes_icm_ctx { u_int32_t ac_ek[4*(RIJNDAEL_MAXNR + 1)]; /* ac_block is initalized to IV */ u_int8_t ac_block[AESICM_BLOCKSIZE]; int ac_nr; }; MALLOC_DEFINE(M_XDATA, "xform", "xform data buffers"); /* Encryption instances */ struct enc_xform enc_xform_null = { CRYPTO_NULL_CBC, "NULL", /* NB: blocksize of 4 is to generate a properly aligned ESP header */ - NULL_BLOCK_LEN, NULL_BLOCK_LEN, 0, 256, /* 2048 bits, max key */ + NULL_BLOCK_LEN, NULL_BLOCK_LEN, NULL_MIN_KEY, NULL_MAX_KEY, null_encrypt, null_decrypt, null_setkey, null_zerokey, NULL, }; struct enc_xform enc_xform_des = { CRYPTO_DES_CBC, "DES", - DES_BLOCK_LEN, DES_BLOCK_LEN, 8, 8, + DES_BLOCK_LEN, DES_BLOCK_LEN, DES_MIN_KEY, DES_MAX_KEY, des1_encrypt, des1_decrypt, des1_setkey, des1_zerokey, NULL, }; struct enc_xform enc_xform_3des = { CRYPTO_3DES_CBC, "3DES", - DES3_BLOCK_LEN, DES3_BLOCK_LEN, 24, 24, + DES3_BLOCK_LEN, DES3_BLOCK_LEN, TRIPLE_DES_MIN_KEY, + TRIPLE_DES_MAX_KEY, des3_encrypt, des3_decrypt, des3_setkey, des3_zerokey, NULL, }; struct enc_xform enc_xform_blf = { CRYPTO_BLF_CBC, "Blowfish", - BLOWFISH_BLOCK_LEN, BLOWFISH_BLOCK_LEN, 5, 56 /* 448 bits, max key */, + BLOWFISH_BLOCK_LEN, BLOWFISH_BLOCK_LEN, BLOWFISH_MIN_KEY, + BLOWFISH_MAX_KEY, blf_encrypt, blf_decrypt, blf_setkey, blf_zerokey, NULL, }; struct enc_xform enc_xform_cast5 = { CRYPTO_CAST_CBC, "CAST-128", - CAST128_BLOCK_LEN, CAST128_BLOCK_LEN, 5, 16, + CAST128_BLOCK_LEN, CAST128_BLOCK_LEN, CAST_MIN_KEY, CAST_MAX_KEY, cast5_encrypt, cast5_decrypt, cast5_setkey, cast5_zerokey, NULL, }; struct enc_xform enc_xform_skipjack = { CRYPTO_SKIPJACK_CBC, "Skipjack", - SKIPJACK_BLOCK_LEN, SKIPJACK_BLOCK_LEN, 10, 10, + SKIPJACK_BLOCK_LEN, SKIPJACK_BLOCK_LEN, SKIPJACK_MIN_KEY, + SKIPJACK_MAX_KEY, skipjack_encrypt, skipjack_decrypt, skipjack_setkey, skipjack_zerokey, NULL, }; struct enc_xform enc_xform_rijndael128 = { CRYPTO_RIJNDAEL128_CBC, "Rijndael-128/AES", - RIJNDAEL128_BLOCK_LEN, RIJNDAEL128_BLOCK_LEN, 16, 32, + RIJNDAEL128_BLOCK_LEN, RIJNDAEL128_BLOCK_LEN, RIJNDAEL_MIN_KEY, + RIJNDAEL_MAX_KEY, rijndael128_encrypt, rijndael128_decrypt, rijndael128_setkey, rijndael128_zerokey, NULL, }; struct enc_xform enc_xform_aes_icm = { CRYPTO_AES_ICM, "AES-ICM", - RIJNDAEL128_BLOCK_LEN, RIJNDAEL128_BLOCK_LEN, 16, 32, + RIJNDAEL128_BLOCK_LEN, RIJNDAEL128_BLOCK_LEN, AES_MIN_KEY, AES_MAX_KEY, aes_icm_crypt, aes_icm_crypt, aes_icm_setkey, rijndael128_zerokey, aes_icm_reinit, }; struct enc_xform enc_xform_aes_nist_gcm = { CRYPTO_AES_NIST_GCM_16, "AES-GCM", - 1, 12, 16, 32, + AES_MIN_BLOCK_LEN, AES_IV_LEN, AES_MIN_KEY, AES_MAX_KEY, aes_icm_crypt, aes_icm_crypt, aes_icm_setkey, aes_icm_zerokey, aes_gcm_reinit, }; struct enc_xform enc_xform_aes_nist_gmac = { CRYPTO_AES_NIST_GMAC, "AES-GMAC", - 1, 12, 16, 32, + AES_MIN_BLOCK_LEN, AES_IV_LEN, AES_MIN_KEY, AES_MAX_KEY, NULL, NULL, NULL, NULL, NULL, }; struct enc_xform enc_xform_aes_xts = { CRYPTO_AES_XTS, "AES-XTS", - RIJNDAEL128_BLOCK_LEN, 8, 32, 64, + AES_MIN_BLOCK_LEN, AES_XTS_IV_LEN, AES_XTS_MIN_KEY, AES_XTS_MAX_KEY, aes_xts_encrypt, aes_xts_decrypt, aes_xts_setkey, aes_xts_zerokey, aes_xts_reinit }; struct enc_xform enc_xform_arc4 = { CRYPTO_ARC4, "ARC4", - 1, 1, 1, 32, + ARC4_BLOCK_LEN, ARC4_IV_LEN, ARC4_MIN_KEY, ARC4_MAX_KEY, NULL, NULL, NULL, NULL, NULL, }; struct enc_xform enc_xform_camellia = { CRYPTO_CAMELLIA_CBC, "Camellia", - CAMELLIA_BLOCK_LEN, CAMELLIA_BLOCK_LEN, 8, 32, + CAMELLIA_BLOCK_LEN, CAMELLIA_BLOCK_LEN, CAMELLIA_MIN_KEY, + CAMELLIA_MAX_KEY, cml_encrypt, cml_decrypt, cml_setkey, cml_zerokey, NULL, }; /* Authentication instances */ struct auth_hash auth_hash_null = { /* NB: context isn't used */ CRYPTO_NULL_HMAC, "NULL-HMAC", - 0, NULL_HASH_LEN, sizeof(int), NULL_HMAC_BLOCK_LEN, + NULL_HMAC_KEY_LEN, NULL_HASH_LEN, sizeof(int), NULL_HMAC_BLOCK_LEN, null_init, null_reinit, null_reinit, null_update, null_final }; struct auth_hash auth_hash_hmac_md5 = { CRYPTO_MD5_HMAC, "HMAC-MD5", - 16, MD5_HASH_LEN, sizeof(MD5_CTX), MD5_HMAC_BLOCK_LEN, + MD5_HMAC_KEY_LEN, MD5_HASH_LEN, sizeof(MD5_CTX), MD5_HMAC_BLOCK_LEN, (void (*) (void *)) MD5Init, NULL, NULL, MD5Update_int, (void (*) (u_int8_t *, void *)) MD5Final }; struct auth_hash auth_hash_hmac_sha1 = { CRYPTO_SHA1_HMAC, "HMAC-SHA1", - 20, SHA1_HASH_LEN, sizeof(SHA1_CTX), SHA1_HMAC_BLOCK_LEN, + SHA1_HMAC_KEY_LEN, SHA1_HASH_LEN, sizeof(SHA1_CTX), SHA1_HMAC_BLOCK_LEN, SHA1Init_int, NULL, NULL, SHA1Update_int, SHA1Final_int }; struct auth_hash auth_hash_hmac_ripemd_160 = { CRYPTO_RIPEMD160_HMAC, "HMAC-RIPEMD-160", - 20, RIPEMD160_HASH_LEN, sizeof(RMD160_CTX), RIPEMD160_HMAC_BLOCK_LEN, + RIPEMD160_HMAC_KEY_LEN, RIPEMD160_HASH_LEN, sizeof(RMD160_CTX), + RIPEMD160_HMAC_BLOCK_LEN, (void (*)(void *)) RMD160Init, NULL, NULL, RMD160Update_int, (void (*)(u_int8_t *, void *)) RMD160Final }; struct auth_hash auth_hash_key_md5 = { CRYPTO_MD5_KPDK, "Keyed MD5", - 0, MD5_KPDK_HASH_LEN, sizeof(MD5_CTX), 0, + NULL_HMAC_KEY_LEN, MD5_KPDK_HASH_LEN, sizeof(MD5_CTX), 0, (void (*)(void *)) MD5Init, NULL, NULL, MD5Update_int, (void (*)(u_int8_t *, void *)) MD5Final }; struct auth_hash auth_hash_key_sha1 = { CRYPTO_SHA1_KPDK, "Keyed SHA1", - 0, SHA1_KPDK_HASH_LEN, sizeof(SHA1_CTX), 0, + NULL_HMAC_KEY_LEN, SHA1_KPDK_HASH_LEN, sizeof(SHA1_CTX), 0, SHA1Init_int, NULL, NULL, SHA1Update_int, SHA1Final_int }; struct auth_hash auth_hash_hmac_sha2_256 = { CRYPTO_SHA2_256_HMAC, "HMAC-SHA2-256", - 32, SHA2_256_HASH_LEN, sizeof(SHA256_CTX), SHA2_256_HMAC_BLOCK_LEN, + SHA2_256_HMAC_KEY_LEN, SHA2_256_HASH_LEN, sizeof(SHA256_CTX), + SHA2_256_HMAC_BLOCK_LEN, (void (*)(void *)) SHA256_Init, NULL, NULL, SHA256Update_int, (void (*)(u_int8_t *, void *)) SHA256_Final }; struct auth_hash auth_hash_hmac_sha2_384 = { CRYPTO_SHA2_384_HMAC, "HMAC-SHA2-384", - 48, SHA2_384_HASH_LEN, sizeof(SHA384_CTX), SHA2_384_HMAC_BLOCK_LEN, + SHA2_384_HMAC_KEY_LEN, SHA2_384_HASH_LEN, sizeof(SHA384_CTX), + SHA2_384_HMAC_BLOCK_LEN, (void (*)(void *)) SHA384_Init, NULL, NULL, SHA384Update_int, (void (*)(u_int8_t *, void *)) SHA384_Final }; struct auth_hash auth_hash_hmac_sha2_512 = { CRYPTO_SHA2_512_HMAC, "HMAC-SHA2-512", - 64, SHA2_512_HASH_LEN, sizeof(SHA512_CTX), SHA2_512_HMAC_BLOCK_LEN, + SHA2_512_HMAC_KEY_LEN, SHA2_512_HASH_LEN, sizeof(SHA512_CTX), + SHA2_512_HMAC_BLOCK_LEN, (void (*)(void *)) SHA512_Init, NULL, NULL, SHA512Update_int, (void (*)(u_int8_t *, void *)) SHA512_Final }; struct auth_hash auth_hash_nist_gmac_aes_128 = { CRYPTO_AES_128_NIST_GMAC, "GMAC-AES-128", - 16, 16, sizeof(struct aes_gmac_ctx), GMAC_BLOCK_LEN, + AES_128_HMAC_KEY_LEN, AES_HASH_LEN, sizeof(struct aes_gmac_ctx), + GMAC_BLOCK_LEN, (void (*)(void *)) AES_GMAC_Init, (void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Setkey, (void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Reinit, (int (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Update, (void (*)(u_int8_t *, void *)) AES_GMAC_Final }; struct auth_hash auth_hash_nist_gmac_aes_192 = { CRYPTO_AES_192_NIST_GMAC, "GMAC-AES-192", - 24, 16, sizeof(struct aes_gmac_ctx), GMAC_BLOCK_LEN, + AES_192_HMAC_KEY_LEN, AES_HASH_LEN, sizeof(struct aes_gmac_ctx), + GMAC_BLOCK_LEN, (void (*)(void *)) AES_GMAC_Init, (void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Setkey, (void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Reinit, (int (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Update, (void (*)(u_int8_t *, void *)) AES_GMAC_Final }; struct auth_hash auth_hash_nist_gmac_aes_256 = { CRYPTO_AES_256_NIST_GMAC, "GMAC-AES-256", - 32, 16, sizeof(struct aes_gmac_ctx), GMAC_BLOCK_LEN, + AES_256_HMAC_KEY_LEN, AES_HASH_LEN, sizeof(struct aes_gmac_ctx), + GMAC_BLOCK_LEN, (void (*)(void *)) AES_GMAC_Init, (void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Setkey, (void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Reinit, (int (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Update, (void (*)(u_int8_t *, void *)) AES_GMAC_Final }; /* Compression instance */ struct comp_algo comp_algo_deflate = { CRYPTO_DEFLATE_COMP, "Deflate", 90, deflate_compress, deflate_decompress }; /* * Encryption wrapper routines. */ static void null_encrypt(caddr_t key, u_int8_t *blk) { } static void null_decrypt(caddr_t key, u_int8_t *blk) { } static int null_setkey(u_int8_t **sched, u_int8_t *key, int len) { *sched = NULL; return 0; } static void null_zerokey(u_int8_t **sched) { *sched = NULL; } static void des1_encrypt(caddr_t key, u_int8_t *blk) { des_cblock *cb = (des_cblock *) blk; des_key_schedule *p = (des_key_schedule *) key; des_ecb_encrypt(cb, cb, p[0], DES_ENCRYPT); } static void des1_decrypt(caddr_t key, u_int8_t *blk) { des_cblock *cb = (des_cblock *) blk; des_key_schedule *p = (des_key_schedule *) key; des_ecb_encrypt(cb, cb, p[0], DES_DECRYPT); } static int des1_setkey(u_int8_t **sched, u_int8_t *key, int len) { des_key_schedule *p; int err; p = malloc(sizeof (des_key_schedule), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (p != NULL) { des_set_key((des_cblock *) key, p[0]); err = 0; } else err = ENOMEM; *sched = (u_int8_t *) p; return err; } static void des1_zerokey(u_int8_t **sched) { bzero(*sched, sizeof (des_key_schedule)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } static void des3_encrypt(caddr_t key, u_int8_t *blk) { des_cblock *cb = (des_cblock *) blk; des_key_schedule *p = (des_key_schedule *) key; des_ecb3_encrypt(cb, cb, p[0], p[1], p[2], DES_ENCRYPT); } static void des3_decrypt(caddr_t key, u_int8_t *blk) { des_cblock *cb = (des_cblock *) blk; des_key_schedule *p = (des_key_schedule *) key; des_ecb3_encrypt(cb, cb, p[0], p[1], p[2], DES_DECRYPT); } static int des3_setkey(u_int8_t **sched, u_int8_t *key, int len) { des_key_schedule *p; int err; p = malloc(3*sizeof (des_key_schedule), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (p != NULL) { des_set_key((des_cblock *)(key + 0), p[0]); des_set_key((des_cblock *)(key + 8), p[1]); des_set_key((des_cblock *)(key + 16), p[2]); err = 0; } else err = ENOMEM; *sched = (u_int8_t *) p; return err; } static void des3_zerokey(u_int8_t **sched) { bzero(*sched, 3*sizeof (des_key_schedule)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } static void blf_encrypt(caddr_t key, u_int8_t *blk) { BF_LONG t[2]; memcpy(t, blk, sizeof (t)); t[0] = ntohl(t[0]); t[1] = ntohl(t[1]); /* NB: BF_encrypt expects the block in host order! */ BF_encrypt(t, (BF_KEY *) key); t[0] = htonl(t[0]); t[1] = htonl(t[1]); memcpy(blk, t, sizeof (t)); } static void blf_decrypt(caddr_t key, u_int8_t *blk) { BF_LONG t[2]; memcpy(t, blk, sizeof (t)); t[0] = ntohl(t[0]); t[1] = ntohl(t[1]); /* NB: BF_decrypt expects the block in host order! */ BF_decrypt(t, (BF_KEY *) key); t[0] = htonl(t[0]); t[1] = htonl(t[1]); memcpy(blk, t, sizeof (t)); } static int blf_setkey(u_int8_t **sched, u_int8_t *key, int len) { int err; *sched = malloc(sizeof(BF_KEY), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (*sched != NULL) { BF_set_key((BF_KEY *) *sched, len, key); err = 0; } else err = ENOMEM; return err; } static void blf_zerokey(u_int8_t **sched) { bzero(*sched, sizeof(BF_KEY)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } static void cast5_encrypt(caddr_t key, u_int8_t *blk) { cast_encrypt((cast_key *) key, blk, blk); } static void cast5_decrypt(caddr_t key, u_int8_t *blk) { cast_decrypt((cast_key *) key, blk, blk); } static int cast5_setkey(u_int8_t **sched, u_int8_t *key, int len) { int err; *sched = malloc(sizeof(cast_key), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (*sched != NULL) { cast_setkey((cast_key *)*sched, key, len); err = 0; } else err = ENOMEM; return err; } static void cast5_zerokey(u_int8_t **sched) { bzero(*sched, sizeof(cast_key)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } static void skipjack_encrypt(caddr_t key, u_int8_t *blk) { skipjack_forwards(blk, blk, (u_int8_t **) key); } static void skipjack_decrypt(caddr_t key, u_int8_t *blk) { skipjack_backwards(blk, blk, (u_int8_t **) key); } static int skipjack_setkey(u_int8_t **sched, u_int8_t *key, int len) { int err; /* NB: allocate all the memory that's needed at once */ *sched = malloc(10 * (sizeof(u_int8_t *) + 0x100), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (*sched != NULL) { u_int8_t** key_tables = (u_int8_t**) *sched; u_int8_t* table = (u_int8_t*) &key_tables[10]; int k; for (k = 0; k < 10; k++) { key_tables[k] = table; table += 0x100; } subkey_table_gen(key, (u_int8_t **) *sched); err = 0; } else err = ENOMEM; return err; } static void skipjack_zerokey(u_int8_t **sched) { bzero(*sched, 10 * (sizeof(u_int8_t *) + 0x100)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } static void rijndael128_encrypt(caddr_t key, u_int8_t *blk) { rijndael_encrypt((rijndael_ctx *) key, (u_char *) blk, (u_char *) blk); } static void rijndael128_decrypt(caddr_t key, u_int8_t *blk) { rijndael_decrypt(((rijndael_ctx *) key), (u_char *) blk, (u_char *) blk); } static int rijndael128_setkey(u_int8_t **sched, u_int8_t *key, int len) { int err; if (len != 16 && len != 24 && len != 32) return (EINVAL); *sched = malloc(sizeof(rijndael_ctx), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (*sched != NULL) { rijndael_set_key((rijndael_ctx *) *sched, (u_char *) key, len * 8); err = 0; } else err = ENOMEM; return err; } static void rijndael128_zerokey(u_int8_t **sched) { bzero(*sched, sizeof(rijndael_ctx)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } void aes_icm_reinit(caddr_t key, u_int8_t *iv) { struct aes_icm_ctx *ctx; ctx = (struct aes_icm_ctx *)key; bcopy(iv, ctx->ac_block, AESICM_BLOCKSIZE); } void aes_gcm_reinit(caddr_t key, u_int8_t *iv) { struct aes_icm_ctx *ctx; aes_icm_reinit(key, iv); ctx = (struct aes_icm_ctx *)key; /* GCM starts with 2 as counter 1 is used for final xor of tag. */ bzero(&ctx->ac_block[AESICM_BLOCKSIZE - 4], 4); ctx->ac_block[AESICM_BLOCKSIZE - 1] = 2; } void aes_icm_crypt(caddr_t key, u_int8_t *data) { struct aes_icm_ctx *ctx; u_int8_t keystream[AESICM_BLOCKSIZE]; int i; ctx = (struct aes_icm_ctx *)key; rijndaelEncrypt(ctx->ac_ek, ctx->ac_nr, ctx->ac_block, keystream); for (i = 0; i < AESICM_BLOCKSIZE; i++) data[i] ^= keystream[i]; explicit_bzero(keystream, sizeof(keystream)); /* increment counter */ for (i = AESICM_BLOCKSIZE - 1; i >= 0; i--) if (++ctx->ac_block[i]) /* continue on overflow */ break; } int aes_icm_setkey(u_int8_t **sched, u_int8_t *key, int len) { struct aes_icm_ctx *ctx; *sched = malloc(sizeof(struct aes_icm_ctx), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); if (*sched == NULL) return ENOMEM; ctx = (struct aes_icm_ctx *)*sched; ctx->ac_nr = rijndaelKeySetupEnc(ctx->ac_ek, (u_char *)key, len * 8); if (ctx->ac_nr == 0) return EINVAL; return 0; } void aes_icm_zerokey(u_int8_t **sched) { bzero(*sched, sizeof(struct aes_icm_ctx)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } #define AES_XTS_BLOCKSIZE 16 #define AES_XTS_IVSIZE 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ struct aes_xts_ctx { rijndael_ctx key1; rijndael_ctx key2; u_int8_t tweak[AES_XTS_BLOCKSIZE]; }; void aes_xts_reinit(caddr_t key, u_int8_t *iv) { struct aes_xts_ctx *ctx = (struct aes_xts_ctx *)key; u_int64_t blocknum; u_int i; /* * Prepare tweak as E_k2(IV). IV is specified as LE representation * of a 64-bit block number which we allow to be passed in directly. */ bcopy(iv, &blocknum, AES_XTS_IVSIZE); for (i = 0; i < AES_XTS_IVSIZE; i++) { ctx->tweak[i] = blocknum & 0xff; blocknum >>= 8; } /* Last 64 bits of IV are always zero */ bzero(ctx->tweak + AES_XTS_IVSIZE, AES_XTS_IVSIZE); rijndael_encrypt(&ctx->key2, ctx->tweak, ctx->tweak); } static void aes_xts_crypt(struct aes_xts_ctx *ctx, u_int8_t *data, u_int do_encrypt) { u_int8_t block[AES_XTS_BLOCKSIZE]; u_int i, carry_in, carry_out; for (i = 0; i < AES_XTS_BLOCKSIZE; i++) block[i] = data[i] ^ ctx->tweak[i]; if (do_encrypt) rijndael_encrypt(&ctx->key1, block, data); else rijndael_decrypt(&ctx->key1, block, data); for (i = 0; i < AES_XTS_BLOCKSIZE; i++) data[i] ^= ctx->tweak[i]; /* Exponentiate tweak */ carry_in = 0; for (i = 0; i < AES_XTS_BLOCKSIZE; i++) { carry_out = ctx->tweak[i] & 0x80; ctx->tweak[i] = (ctx->tweak[i] << 1) | (carry_in ? 1 : 0); carry_in = carry_out; } if (carry_in) ctx->tweak[0] ^= AES_XTS_ALPHA; bzero(block, sizeof(block)); } void aes_xts_encrypt(caddr_t key, u_int8_t *data) { aes_xts_crypt((struct aes_xts_ctx *)key, data, 1); } void aes_xts_decrypt(caddr_t key, u_int8_t *data) { aes_xts_crypt((struct aes_xts_ctx *)key, data, 0); } int aes_xts_setkey(u_int8_t **sched, u_int8_t *key, int len) { struct aes_xts_ctx *ctx; if (len != 32 && len != 64) return EINVAL; *sched = malloc(sizeof(struct aes_xts_ctx), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); if (*sched == NULL) return ENOMEM; ctx = (struct aes_xts_ctx *)*sched; rijndael_set_key(&ctx->key1, key, len * 4); rijndael_set_key(&ctx->key2, key + (len / 2), len * 4); return 0; } void aes_xts_zerokey(u_int8_t **sched) { bzero(*sched, sizeof(struct aes_xts_ctx)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } static void cml_encrypt(caddr_t key, u_int8_t *blk) { camellia_encrypt((camellia_ctx *) key, (u_char *) blk, (u_char *) blk); } static void cml_decrypt(caddr_t key, u_int8_t *blk) { camellia_decrypt(((camellia_ctx *) key), (u_char *) blk, (u_char *) blk); } static int cml_setkey(u_int8_t **sched, u_int8_t *key, int len) { int err; if (len != 16 && len != 24 && len != 32) return (EINVAL); *sched = malloc(sizeof(camellia_ctx), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (*sched != NULL) { camellia_set_key((camellia_ctx *) *sched, (u_char *) key, len * 8); err = 0; } else err = ENOMEM; return err; } static void cml_zerokey(u_int8_t **sched) { bzero(*sched, sizeof(camellia_ctx)); free(*sched, M_CRYPTO_DATA); *sched = NULL; } /* * And now for auth. */ static void null_init(void *ctx) { } static void null_reinit(void *ctx, const u_int8_t *buf, u_int16_t len) { } static int null_update(void *ctx, const u_int8_t *buf, u_int16_t len) { return 0; } static void null_final(u_int8_t *buf, void *ctx) { if (buf != (u_int8_t *) 0) bzero(buf, 12); } static int RMD160Update_int(void *ctx, const u_int8_t *buf, u_int16_t len) { RMD160Update(ctx, buf, len); return 0; } static int MD5Update_int(void *ctx, const u_int8_t *buf, u_int16_t len) { MD5Update(ctx, buf, len); return 0; } static void SHA1Init_int(void *ctx) { SHA1Init(ctx); } static int SHA1Update_int(void *ctx, const u_int8_t *buf, u_int16_t len) { SHA1Update(ctx, buf, len); return 0; } static void SHA1Final_int(u_int8_t *blk, void *ctx) { SHA1Final(blk, ctx); } static int SHA256Update_int(void *ctx, const u_int8_t *buf, u_int16_t len) { SHA256_Update(ctx, buf, len); return 0; } static int SHA384Update_int(void *ctx, const u_int8_t *buf, u_int16_t len) { SHA384_Update(ctx, buf, len); return 0; } static int SHA512Update_int(void *ctx, const u_int8_t *buf, u_int16_t len) { SHA512_Update(ctx, buf, len); return 0; } /* * And compression */ static u_int32_t deflate_compress(data, size, out) u_int8_t *data; u_int32_t size; u_int8_t **out; { return deflate_global(data, size, 0, out); } static u_int32_t deflate_decompress(data, size, out) u_int8_t *data; u_int32_t size; u_int8_t **out; { return deflate_global(data, size, 1, out); }