summaryrefslogtreecommitdiff
path: root/cipher
diff options
context:
space:
mode:
authorWerner Koch <wk@gnupg.org>2015-01-16 14:55:03 +0100
committerWerner Koch <wk@gnupg.org>2015-01-16 14:55:03 +0100
commit067d7d8752d4d8a98f8e0e5e9b1a5b13e1b7ff9c (patch)
tree1eab7affe5d24e919a22a5d4a29c8303342cf8db /cipher
parent9d2a22c94ae99f9301321082c4fb8d73f4085fda (diff)
downloadlibgcrypt-067d7d8752d4d8a98f8e0e5e9b1a5b13e1b7ff9c.tar.gz
Add OCB cipher mode
* cipher/cipher-ocb.c: New. * cipher/Makefile.am (libcipher_la_SOURCES): Add cipher-ocb.c * cipher/cipher-internal.h (OCB_BLOCK_LEN, OCB_L_TABLE_SIZE): New. (gcry_cipher_handle): Add fields marks.finalize and u_mode.ocb. * cipher/cipher.c (_gcry_cipher_open_internal): Add OCB mode. (_gcry_cipher_open_internal): Setup default taglen of OCB. (cipher_reset): Clear OCB specific data. (cipher_encrypt, cipher_decrypt, _gcry_cipher_authenticate) (_gcry_cipher_gettag, _gcry_cipher_checktag): Call OCB functions. (_gcry_cipher_setiv): Add OCB specific nonce setting. (_gcry_cipher_ctl): Add GCRYCTL_FINALIZE and GCRYCTL_SET_TAGLEN * src/gcrypt.h.in (GCRYCTL_SET_TAGLEN): New. (gcry_cipher_final): New. * cipher/bufhelp.h (buf_xor_1): New. * tests/basic.c (hex2buffer): New. (check_ocb_cipher): New. (main): Call it here. Add option --cipher-modes. * tests/bench-slope.c (bench_aead_encrypt_do_bench): Call gcry_cipher_final. (bench_aead_decrypt_do_bench): Ditto. (bench_aead_authenticate_do_bench): Ditto. Check error code. (bench_ocb_encrypt_do_bench): New. (bench_ocb_decrypt_do_bench): New. (bench_ocb_authenticate_do_bench): New. (ocb_encrypt_ops): New. (ocb_decrypt_ops): New. (ocb_authenticate_ops): New. (cipher_modes): Add them. (cipher_bench_one): Skip wrong block length for OCB. * tests/benchmark.c (cipher_bench): Add field noncelen to MODES. Add OCB support. -- See the comments on top of cipher/cipher-ocb.c for the patent status of the OCB mode. The implementation has not yet been optimized and as such is not faster that the other AEAD modes. A first candidate for optimization is the double_block function. Large improvements can be expected by writing an AES ECB function to work on multiple blocks. Signed-off-by: Werner Koch <wk@gnupg.org>
Diffstat (limited to 'cipher')
-rw-r--r--cipher/Makefile.am3
-rw-r--r--cipher/bufhelp.h34
-rw-r--r--cipher/cipher-internal.h83
-rw-r--r--cipher/cipher-ocb.c495
-rw-r--r--cipher/cipher.c83
5 files changed, 694 insertions, 4 deletions
diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index ceb95f19..4a9c86d8 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -41,7 +41,8 @@ libcipher_la_SOURCES = \
cipher.c cipher-internal.h \
cipher-cbc.c cipher-cfb.c cipher-ofb.c cipher-ctr.c cipher-aeswrap.c \
cipher-ccm.c cipher-cmac.c cipher-gcm.c cipher-gcm-intel-pclmul.c \
-cipher-poly1305.c cipher-selftest.c cipher-selftest.h \
+cipher-poly1305.c cipher-ocb.c \
+cipher-selftest.c cipher-selftest.h \
pubkey.c pubkey-internal.h pubkey-util.c \
md.c \
mac.c mac-internal.h \
diff --git a/cipher/bufhelp.h b/cipher/bufhelp.h
index 464b1416..a372acb4 100644
--- a/cipher/bufhelp.h
+++ b/cipher/bufhelp.h
@@ -120,6 +120,40 @@ do_bytes:
}
+/* Optimized function for in-place buffer xoring. */
+static inline void
+buf_xor_1(void *_dst, const void *_src, size_t len)
+{
+ byte *dst = _dst;
+ const byte *src = _src;
+ uintptr_t *ldst;
+ const uintptr_t *lsrc;
+#ifndef BUFHELP_FAST_UNALIGNED_ACCESS
+ const unsigned int longmask = sizeof(uintptr_t) - 1;
+
+ /* Skip fast processing if buffers are unaligned. */
+ if (((uintptr_t)dst | (uintptr_t)src) & longmask)
+ goto do_bytes;
+#endif
+
+ ldst = (uintptr_t *)(void *)dst;
+ lsrc = (const uintptr_t *)(const void *)src;
+
+ for (; len >= sizeof(uintptr_t); len -= sizeof(uintptr_t))
+ *ldst++ ^= *lsrc++;
+
+ dst = (byte *)ldst;
+ src = (const byte *)lsrc;
+
+#ifndef BUFHELP_FAST_UNALIGNED_ACCESS
+do_bytes:
+#endif
+ /* Handle tail. */
+ for (; len; len--)
+ *dst++ ^= *src;
+}
+
+
/* Optimized function for buffer xoring with two destination buffers. Used
mainly by CFB mode encryption. */
static inline void
diff --git a/cipher/cipher-internal.h b/cipher/cipher-internal.h
index 650d8133..50b03243 100644
--- a/cipher/cipher-internal.h
+++ b/cipher/cipher-internal.h
@@ -26,6 +26,25 @@
/* The maximum supported size of a block in bytes. */
#define MAX_BLOCKSIZE 16
+/* The length for an OCB block. Although OCB supports any block
+ length it does not make sense to use a 64 bit blocklen (and cipher)
+ because this reduces the security margin to an unacceptable state.
+ Thus we require a cipher with 128 bit blocklength. */
+#define OCB_BLOCK_LEN (128/8)
+
+/* The size of the pre-computed L table for OCB. This takes the same
+ size as the table used for GCM and thus we don't save anything by
+ not using such a table. */
+#define OCB_L_TABLE_SIZE 16
+
+
+/* Check the above constants. */
+#if OCB_BLOCK_LEN > MAX_BLOCKSIZE
+# error OCB_BLOCKLEN > MAX_BLOCKSIZE
+#endif
+
+
+
/* Magic values for the context structure. */
#define CTX_MAGIC_NORMAL 0x24091964
#define CTX_MAGIC_SECURE 0x46919042
@@ -119,19 +138,22 @@ struct gcry_cipher_handle
unsigned int key:1; /* Set to 1 if a key has been set. */
unsigned int iv:1; /* Set to 1 if a IV has been set. */
unsigned int tag:1; /* Set to 1 if a tag is finalized. */
+ unsigned int finalize:1; /* Next encrypt/decrypt has the final data. */
} marks;
/* The initialization vector. For best performance we make sure
that it is properly aligned. In particular some implementations
of bulk operations expect an 16 byte aligned IV. IV is also used
- to store CBC-MAC in CCM mode; counter IV is stored in U_CTR. */
+ to store CBC-MAC in CCM mode; counter IV is stored in U_CTR. For
+ OCB mode it is used for the offset value. */
union {
cipher_context_alignment_t iv_align;
unsigned char iv[MAX_BLOCKSIZE];
} u_iv;
/* The counter for CTR mode. This field is also used by AESWRAP and
- thus we can't use the U_IV union. */
+ thus we can't use the U_IV union. For OCB mode it is used for
+ the checksum. */
union {
cipher_context_alignment_t iv_align;
unsigned char ctr[MAX_BLOCKSIZE];
@@ -232,6 +254,40 @@ struct gcry_cipher_handle
#endif
#endif
} gcm;
+
+ /* Mode specific storage for OCB mode. */
+ struct {
+ /* Helper variables and pre-computed table of L values. */
+ unsigned char L_star[OCB_BLOCK_LEN];
+ unsigned char L_dollar[OCB_BLOCK_LEN];
+ unsigned char L[OCB_BLOCK_LEN][OCB_L_TABLE_SIZE];
+
+ /* The tag is valid if marks.tag has been set. */
+ unsigned char tag[OCB_BLOCK_LEN];
+
+ /* A buffer to hold the offset for the AAD processing. */
+ unsigned char aad_offset[OCB_BLOCK_LEN];
+
+ /* A buffer to hold the current sum of AAD processing. We can't
+ use tag here because tag may already hold the preprocessed
+ checksum of the data. */
+ unsigned char aad_sum[OCB_BLOCK_LEN];
+
+ /* Number of data/aad blocks processed so far. */
+ u64 data_nblocks;
+ u64 aad_nblocks;
+
+ /* Length of the tag. Fixed for now but may eventually be
+ specified using a set of gcry_cipher_flags. */
+ unsigned char taglen;
+
+ /* Flags indicating that the final data/aad block has been
+ processed. */
+ unsigned int data_finalized:1;
+ unsigned int aad_finalized:1;
+
+ } ocb;
+
} u_mode;
/* What follows are two contexts of the cipher in use. The first
@@ -363,4 +419,27 @@ gcry_err_code_t _gcry_cipher_poly1305_check_tag
void _gcry_cipher_poly1305_setkey
/* */ (gcry_cipher_hd_t c);
+
+/*-- cipher-ocb.c --*/
+gcry_err_code_t _gcry_cipher_ocb_encrypt
+/* */ (gcry_cipher_hd_t c,
+ unsigned char *outbuf, size_t outbuflen,
+ const unsigned char *inbuf, size_t inbuflen);
+gcry_err_code_t _gcry_cipher_ocb_decrypt
+/* */ (gcry_cipher_hd_t c,
+ unsigned char *outbuf, size_t outbuflen,
+ const unsigned char *inbuf, size_t inbuflen);
+gcry_err_code_t _gcry_cipher_ocb_set_nonce
+/* */ (gcry_cipher_hd_t c, const unsigned char *nonce,
+ size_t noncelen);
+gcry_err_code_t _gcry_cipher_ocb_authenticate
+/* */ (gcry_cipher_hd_t c, const unsigned char *abuf, size_t abuflen);
+gcry_err_code_t _gcry_cipher_ocb_get_tag
+/* */ (gcry_cipher_hd_t c,
+ unsigned char *outtag, size_t taglen);
+gcry_err_code_t _gcry_cipher_ocb_check_tag
+/* */ (gcry_cipher_hd_t c,
+ const unsigned char *intag, size_t taglen);
+
+
#endif /*G10_CIPHER_INTERNAL_H*/
diff --git a/cipher/cipher-ocb.c b/cipher/cipher-ocb.c
new file mode 100644
index 00000000..25466f0d
--- /dev/null
+++ b/cipher/cipher-ocb.c
@@ -0,0 +1,495 @@
+/* cipher-ocb.c - OCB cipher mode
+ * Copyright (C) 2015 g10 Code GmbH
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser general Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * OCB is covered by several patents but may be used freely by most
+ * software. See http://web.cs.ucdavis.edu/~rogaway/ocb/license.htm .
+ * In particular license 1 is suitable for Libgcrypt: See
+ * http://web.cs.ucdavis.edu/~rogaway/ocb/license1.pdf for the full
+ * license document; it basically says:
+ *
+ * License 1 — License for Open-Source Software Implementations of OCB
+ * (Jan 9, 2013)
+ *
+ * Under this license, you are authorized to make, use, and
+ * distribute open-source software implementations of OCB. This
+ * license terminates for you if you sue someone over their
+ * open-source software implementation of OCB claiming that you have
+ * a patent covering their implementation.
+ */
+
+
+#include <config.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "g10lib.h"
+#include "cipher.h"
+#include "bufhelp.h"
+#include "./cipher-internal.h"
+
+
+/* Double the OCB_BLOCK_LEN sized block B in-place. */
+static inline void
+double_block (unsigned char *b)
+{
+#if OCB_BLOCK_LEN != 16
+ unsigned char b_0 = b[0];
+ int i;
+
+ for (i=0; i < OCB_BLOCK_LEN - 1; i++)
+ b[i] = (b[i] << 1) | (b[i+1] >> 7);
+
+ b[OCB_BLOCK_LEN-1] = (b[OCB_BLOCK_LEN-1] << 1) ^ ((b_0 >> 7) * 135);
+#else
+ /* This is the generic code for 16 byte blocks. However it is not
+ faster than the straight byte by byte implementation. */
+ u64 l_0, l, r;
+
+ l = buf_get_be64 (b);
+ r = buf_get_be64 (b + 8);
+
+ l_0 = (int64_t)l >> 63;
+ l = (l + l) ^ (r >> 63);
+ r = (r + r) ^ (l_0 & 135);
+
+ buf_put_be64 (b, l);
+ buf_put_be64 (b+8, r);
+#endif
+}
+
+
+/* Double the OCB_BLOCK_LEN sized block S and store it at D. S and D
+ may point to the same memory location but they may not overlap. */
+static void
+double_block_cpy (unsigned char *d, const unsigned char *s)
+{
+ if (d != s)
+ buf_cpy (d, s, OCB_BLOCK_LEN);
+ double_block (d);
+}
+
+
+/* Copy NBYTES from buffer S starting at bit offset BITOFF to buffer D. */
+static void
+bit_copy (unsigned char *d, const unsigned char *s,
+ unsigned int bitoff, unsigned int nbytes)
+{
+ unsigned int shift;
+
+ s += bitoff / 8;
+ shift = bitoff % 8;
+ if (shift)
+ {
+ for (; nbytes; nbytes--, d++, s++)
+ *d = (s[0] << shift) | (s[1] >> (8 - shift));
+ }
+ else
+ {
+ for (; nbytes; nbytes--, d++, s++)
+ *d = *s;
+ }
+}
+
+
+/* Return the L-value for block N. In most cases we use the table;
+ only if the lower OCB_L_TABLE_SIZE bits of N are zero we need to
+ compute it. With a table size of 16 we need to this this only
+ every 65536-th block. L_TMP is a helper buffer of size
+ OCB_BLOCK_LEN which is used to hold the computation if not taken
+ from the table. */
+static const unsigned char *
+get_l (gcry_cipher_hd_t c, unsigned char *l_tmp, u64 n)
+{
+ int ntz = _gcry_ctz64 (n);
+
+ if (ntz < OCB_L_TABLE_SIZE)
+ return c->u_mode.ocb.L[ntz];
+
+ double_block_cpy (l_tmp, c->u_mode.ocb.L[OCB_L_TABLE_SIZE - 1]);
+ for (ntz -= OCB_L_TABLE_SIZE; ntz; ntz--)
+ double_block (l_tmp);
+
+ return l_tmp;
+}
+
+
+/* Set the nonce for OCB. This requires that the key has been set.
+ Using it again resets start a new encryption cycle using the same
+ key. */
+gcry_err_code_t
+_gcry_cipher_ocb_set_nonce (gcry_cipher_hd_t c, const unsigned char *nonce,
+ size_t noncelen)
+{
+ unsigned char ktop[OCB_BLOCK_LEN];
+ unsigned char stretch[OCB_BLOCK_LEN + 8];
+ unsigned int bottom;
+ int i;
+ unsigned int burn = 0;
+ unsigned int nburn;
+
+ /* Check args. */
+ if (!c->marks.key)
+ return GPG_ERR_INV_STATE; /* Key must have been set first. */
+ switch (c->u_mode.ocb.taglen)
+ {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return GPG_ERR_BUG; /* Invalid tag length. */
+ }
+
+ if (c->spec->blocksize != OCB_BLOCK_LEN)
+ return GPG_ERR_CIPHER_ALGO;
+ if (!nonce)
+ return GPG_ERR_INV_ARG;
+ /* 120 bit is the allowed maximum. In addition we impose a minimum
+ of 64 bit. */
+ if (noncelen > (120/8) || noncelen < (64/8) || noncelen >= OCB_BLOCK_LEN)
+ return GPG_ERR_INV_LENGTH;
+
+ /* Set up the L table. */
+ /* L_star = E(zero_128) */
+ memset (ktop, 0, OCB_BLOCK_LEN);
+ nburn = c->spec->encrypt (&c->context.c, c->u_mode.ocb.L_star, ktop);
+ burn = nburn > burn ? nburn : burn;
+ /* L_dollar = double(L_star) */
+ double_block_cpy (c->u_mode.ocb.L_dollar, c->u_mode.ocb.L_star);
+ /* L_0 = double(L_dollar), ... */
+ double_block_cpy (c->u_mode.ocb.L[0], c->u_mode.ocb.L_dollar);
+ for (i = 1; i < OCB_L_TABLE_SIZE; i++)
+ double_block_cpy (c->u_mode.ocb.L[i], c->u_mode.ocb.L[i-1]);
+
+ /* Prepare the nonce. */
+ memset (ktop, 0, (OCB_BLOCK_LEN - noncelen));
+ buf_cpy (ktop + (OCB_BLOCK_LEN - noncelen), nonce, noncelen);
+ ktop[0] = ((c->u_mode.ocb.taglen * 8) % 128) << 1;
+ ktop[OCB_BLOCK_LEN - noncelen - 1] |= 1;
+ bottom = ktop[OCB_BLOCK_LEN - 1] & 0x3f;
+ ktop[OCB_BLOCK_LEN - 1] &= 0xc0; /* Zero the bottom bits. */
+ nburn = c->spec->encrypt (&c->context.c, ktop, ktop);
+ burn = nburn > burn ? nburn : burn;
+ /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */
+ buf_cpy (stretch, ktop, OCB_BLOCK_LEN);
+ buf_xor (stretch + OCB_BLOCK_LEN, ktop, ktop + 1, 8);
+ /* Offset_0 = Stretch[1+bottom..128+bottom]
+ (We use the IV field to store the offset) */
+ bit_copy (c->u_iv.iv, stretch, bottom, OCB_BLOCK_LEN);
+ c->marks.iv = 1;
+
+ /* Checksum_0 = zeros(128)
+ (We use the CTR field to store the checksum) */
+ memset (c->u_ctr.ctr, 0, OCB_BLOCK_LEN);
+
+ /* Clear AAD buffer. */
+ memset (c->u_mode.ocb.aad_offset, 0, OCB_BLOCK_LEN);
+ memset (c->u_mode.ocb.aad_sum, 0, OCB_BLOCK_LEN);
+
+ /* Setup other values. */
+ memset (c->lastiv, 0, sizeof(c->lastiv));
+ c->unused = 0;
+ c->marks.tag = 0;
+ c->marks.finalize = 0;
+ c->u_mode.ocb.data_nblocks = 0;
+ c->u_mode.ocb.aad_nblocks = 0;
+ c->u_mode.ocb.data_finalized = 0;
+ c->u_mode.ocb.aad_finalized = 0;
+
+ /* log_printhex ("L_* ", c->u_mode.ocb.L_star, OCB_BLOCK_LEN); */
+ /* log_printhex ("L_$ ", c->u_mode.ocb.L_dollar, OCB_BLOCK_LEN); */
+ /* log_printhex ("L_0 ", c->u_mode.ocb.L[0], OCB_BLOCK_LEN); */
+ /* log_printhex ("L_1 ", c->u_mode.ocb.L[1], OCB_BLOCK_LEN); */
+ /* log_debug ( "bottom : %u (decimal)\n", bottom); */
+ /* log_printhex ("Ktop ", ktop, OCB_BLOCK_LEN); */
+ /* log_printhex ("Stretch ", stretch, sizeof stretch); */
+ /* log_printhex ("Offset_0 ", c->u_iv.iv, OCB_BLOCK_LEN); */
+
+ /* Cleanup */
+ wipememory (ktop, sizeof ktop);
+ wipememory (stretch, sizeof stretch);
+ if (burn > 0)
+ _gcry_burn_stack (burn + 4*sizeof(void*));
+
+ return 0;
+}
+
+
+/* Process additional authentication data. This implementation allows
+ to add additional authentication data at any time before the final
+ gcry_cipher_gettag. The size of the data provided in
+ (ABUF,ABUFLEN) must be a multiple of the blocksize. If a
+ non-multiple of the blocksize is used no further data may be passed
+ to this function. */
+gcry_err_code_t
+_gcry_cipher_ocb_authenticate (gcry_cipher_hd_t c, const unsigned char *abuf,
+ size_t abuflen)
+{
+ unsigned char l_tmp[OCB_BLOCK_LEN];
+
+ /* Check that a nonce and thus a key has been set and that we have
+ not yet computed the tag. We also return an error if the aad has
+ been finalized (i.e. a short block has been processed). */
+ if (!c->marks.iv || c->marks.tag || c->u_mode.ocb.aad_finalized)
+ return GPG_ERR_INV_STATE;
+
+ /* Check correct usage and arguments. */
+ if (c->spec->blocksize != OCB_BLOCK_LEN)
+ return GPG_ERR_CIPHER_ALGO;
+ if (!abuflen)
+ return 0;
+
+ /* Hash all full blocks. */
+ while (abuflen >= OCB_BLOCK_LEN)
+ {
+ c->u_mode.ocb.aad_nblocks++;
+
+ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
+ buf_xor_1 (c->u_mode.ocb.aad_offset,
+ get_l (c, l_tmp, c->u_mode.ocb.aad_nblocks), OCB_BLOCK_LEN);
+ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
+ buf_xor (l_tmp, c->u_mode.ocb.aad_offset, abuf, OCB_BLOCK_LEN);
+ c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
+ buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
+
+ abuf += OCB_BLOCK_LEN;
+ abuflen -= OCB_BLOCK_LEN;
+ }
+
+ /* Hash final partial block. Note that we expect ABUFLEN to be
+ shorter than OCB_BLOCK_LEN. */
+ if (abuflen)
+ {
+ /* Offset_* = Offset_m xor L_* */
+ buf_xor_1 (c->u_mode.ocb.aad_offset,
+ c->u_mode.ocb.L_star, OCB_BLOCK_LEN);
+ /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_* */
+ buf_cpy (l_tmp, abuf, abuflen);
+ memset (l_tmp + abuflen, 0, OCB_BLOCK_LEN - abuflen);
+ l_tmp[abuflen] = 0x80;
+ buf_xor_1 (l_tmp, c->u_mode.ocb.aad_offset, OCB_BLOCK_LEN);
+ /* Sum = Sum_m xor ENCIPHER(K, CipherInput) */
+ c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
+ buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
+
+ /* Mark AAD as finalized to avoid accidently calling this
+ function again after a non-full block has been processed. */
+ c->u_mode.ocb.aad_finalized = 1;
+ }
+
+ return 0;
+}
+
+
+/* Common code for encrypt and decrypt. */
+static gcry_err_code_t
+ocb_crypt (gcry_cipher_hd_t c, int encrypt,
+ unsigned char *outbuf, size_t outbuflen,
+ const unsigned char *inbuf, size_t inbuflen)
+{
+ unsigned char l_tmp[OCB_BLOCK_LEN];
+ unsigned int burn = 0;
+ unsigned int nburn;
+
+ /* Check that a nonce and thus a key has been set and that we are
+ not yet in end of data state. */
+ if (!c->marks.iv || c->u_mode.ocb.data_finalized)
+ return GPG_ERR_INV_STATE;
+
+ /* Check correct usage and arguments. */
+ if (c->spec->blocksize != OCB_BLOCK_LEN)
+ return GPG_ERR_CIPHER_ALGO;
+ if (outbuflen < inbuflen)
+ return GPG_ERR_BUFFER_TOO_SHORT;
+ if (c->marks.finalize)
+ ; /* Allow arbitarty length. */
+ else if ((inbuflen % OCB_BLOCK_LEN))
+ return GPG_ERR_INV_LENGTH; /* We support only full blocks for now. */
+
+ /* Encrypt all full blocks. */
+ while (inbuflen >= OCB_BLOCK_LEN)
+ {
+ c->u_mode.ocb.data_nblocks++;
+
+ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
+ buf_xor_1 (c->u_iv.iv,
+ get_l (c, l_tmp, c->u_mode.ocb.data_nblocks), OCB_BLOCK_LEN);
+ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
+ buf_xor (outbuf, c->u_iv.iv, inbuf, OCB_BLOCK_LEN);
+ if (encrypt)
+ nburn = c->spec->encrypt (&c->context.c, outbuf, outbuf);
+ else
+ nburn = c->spec->decrypt (&c->context.c, outbuf, outbuf);
+ burn = nburn > burn ? nburn : burn;
+ buf_xor_1 (outbuf, c->u_iv.iv, OCB_BLOCK_LEN);
+
+ /* Checksum_i = Checksum_{i-1} xor P_i */
+ buf_xor_1 (c->u_ctr.ctr, encrypt? inbuf : outbuf, OCB_BLOCK_LEN);
+
+ inbuf += OCB_BLOCK_LEN;
+ inbuflen -= OCB_BLOCK_LEN;
+ outbuf += OCB_BLOCK_LEN;
+ outbuflen =- OCB_BLOCK_LEN;
+ }
+
+ /* Encrypt final partial block. Note that we expect INBUFLEN to be
+ shorter than OCB_BLOCK_LEN (see above). */
+ if (inbuflen)
+ {
+ unsigned char pad[OCB_BLOCK_LEN];
+
+ /* Offset_* = Offset_m xor L_* */
+ buf_xor_1 (c->u_iv.iv, c->u_mode.ocb.L_star, OCB_BLOCK_LEN);
+ /* Pad = ENCIPHER(K, Offset_*) */
+ nburn = c->spec->encrypt (&c->context.c, pad, c->u_iv.iv);
+ burn = nburn > burn ? nburn : burn;
+
+ if (encrypt)
+ {
+ /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
+ /* Note that INBUFLEN is less than OCB_BLOCK_LEN. */
+ buf_cpy (l_tmp, inbuf, inbuflen);
+ memset (l_tmp + inbuflen, 0, OCB_BLOCK_LEN - inbuflen);
+ l_tmp[inbuflen] = 0x80;
+ buf_xor_1 (c->u_ctr.ctr, l_tmp, OCB_BLOCK_LEN);
+ /* C_* = P_* xor Pad[1..bitlen(P_*)] */
+ buf_xor (outbuf, inbuf, pad, inbuflen);
+ }
+ else
+ {
+ /* P_* = C_* xor Pad[1..bitlen(C_*)] */
+ /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
+ buf_cpy (l_tmp, pad, OCB_BLOCK_LEN);
+ buf_cpy (l_tmp, inbuf, inbuflen);
+ buf_xor_1 (l_tmp, pad, OCB_BLOCK_LEN);
+ l_tmp[inbuflen] = 0x80;
+ buf_cpy (outbuf, l_tmp, inbuflen);
+
+ buf_xor_1 (c->u_ctr.ctr, l_tmp, OCB_BLOCK_LEN);
+ }
+ }
+
+ /* Compute the tag if the finalize flag has been set. */
+ if (c->marks.finalize)
+ {
+ /* Tag = ENCIPHER(K, Checksum xor Offset xor L_$) xor HASH(K,A) */
+ buf_xor (c->u_mode.ocb.tag, c->u_ctr.ctr, c->u_iv.iv, OCB_BLOCK_LEN);
+ buf_xor_1 (c->u_mode.ocb.tag, c->u_mode.ocb.L_dollar, OCB_BLOCK_LEN);
+ nburn = c->spec->encrypt (&c->context.c,
+ c->u_mode.ocb.tag, c->u_mode.ocb.tag);
+ burn = nburn > burn ? nburn : burn;
+
+ c->u_mode.ocb.data_finalized = 1;
+ /* Note that the the final part of the tag computation is done
+ by _gcry_cipher_ocb_get_tag. */
+ }
+
+ if (burn > 0)
+ _gcry_burn_stack (burn + 4*sizeof(void*));
+
+ return 0;
+}
+
+
+/* Encrypt (INBUF,INBUFLEN) in OCB mode to OUTBUF. OUTBUFLEN gives
+ the allocated size of OUTBUF. This function accepts only multiples
+ of a full block unless gcry_cipher_final has been called in which
+ case the next block may have any length. */
+gcry_err_code_t
+_gcry_cipher_ocb_encrypt (gcry_cipher_hd_t c,
+ unsigned char *outbuf, size_t outbuflen,
+ const unsigned char *inbuf, size_t inbuflen)
+
+{
+ return ocb_crypt (c, 1, outbuf, outbuflen, inbuf, inbuflen);
+}
+
+
+/* Decrypt (INBUF,INBUFLEN) in OCB mode to OUTBUF. OUTBUFLEN gives
+ the allocated size of OUTBUF. This function accepts only multiples
+ of a full block unless gcry_cipher_final has been called in which
+ case the next block may have any length. */
+gcry_err_code_t
+_gcry_cipher_ocb_decrypt (gcry_cipher_hd_t c,
+ unsigned char *outbuf, size_t outbuflen,
+ const unsigned char *inbuf, size_t inbuflen)
+{
+ return ocb_crypt (c, 0, outbuf, outbuflen, inbuf, inbuflen);
+}
+
+
+/* Compute the tag. The last data operation has already done some
+ part of it. To allow adding AAD even after having done all data,
+ we finish the tag computation only here. */
+static void
+compute_tag_if_needed (gcry_cipher_hd_t c)
+{
+ if (!c->marks.tag)
+ {
+ buf_xor_1 (c->u_mode.ocb.tag, c->u_mode.ocb.aad_sum, OCB_BLOCK_LEN);
+ c->marks.tag = 1;
+ }
+}
+
+
+/* Copy the already computed tag to OUTTAG. OUTTAGSIZE is the
+ allocated size of OUTTAG; the function returns an error if that is
+ too short to hold the tag. */
+gcry_err_code_t
+_gcry_cipher_ocb_get_tag (gcry_cipher_hd_t c,
+ unsigned char *outtag, size_t outtagsize)
+{
+ if (c->u_mode.ocb.taglen > outtagsize)
+ return GPG_ERR_BUFFER_TOO_SHORT;
+ if (!c->u_mode.ocb.data_finalized)
+ return GPG_ERR_INV_STATE; /* Data has not yet been finalized. */
+
+ compute_tag_if_needed (c);
+
+ memcpy (outtag, c->u_mode.ocb.tag, c->u_mode.ocb.taglen);
+
+ return 0;
+}
+
+
+/* Check that the tag (INTAG,TAGLEN) matches the computed tag for the
+ handle C. */
+gcry_err_code_t
+_gcry_cipher_ocb_check_tag (gcry_cipher_hd_t c, const unsigned char *intag,
+ size_t taglen)
+{
+ size_t n;
+
+ if (!c->u_mode.ocb.data_finalized)
+ return GPG_ERR_INV_STATE; /* Data has not yet been finalized. */
+
+ compute_tag_if_needed (c);
+
+ n = c->u_mode.ocb.taglen;
+ if (taglen < n)
+ n = taglen;
+
+ if (!buf_eq_const (intag, c->u_mode.ocb.tag, n)
+ || c->u_mode.ocb.taglen != taglen)
+ return GPG_ERR_CHECKSUM;
+
+ return 0;
+}
diff --git a/cipher/cipher.c b/cipher/cipher.c
index 78cad210..0a13fe61 100644
--- a/cipher/cipher.c
+++ b/cipher/cipher.c
@@ -425,6 +425,17 @@ _gcry_cipher_open_internal (gcry_cipher_hd_t *handle,
err = GPG_ERR_INV_CIPHER_MODE;
break;
+ case GCRY_CIPHER_MODE_OCB:
+ /* Note that our implementation allows only for 128 bit block
+ length algorithms. Lower block lengths would be possible
+ but we do not implement them because they limit the
+ security too much. */
+ if (!spec->encrypt || !spec->decrypt)
+ err = GPG_ERR_INV_CIPHER_MODE;
+ else if (spec->blocksize != (128/8))
+ err = GPG_ERR_INV_CIPHER_MODE;
+ break;
+
case GCRY_CIPHER_MODE_STREAM:
if (!spec->stencrypt || !spec->stdecrypt)
err = GPG_ERR_INV_CIPHER_MODE;
@@ -445,7 +456,8 @@ _gcry_cipher_open_internal (gcry_cipher_hd_t *handle,
/* Perform selftest here and mark this with a flag in cipher_table?
No, we should not do this as it takes too long. Further it does
not make sense to exclude algorithms with failing selftests at
- runtime: If a selftest fails there is something seriously wrong with the system and thus we better die immediately. */
+ runtime: If a selftest fails there is something seriously wrong
+ with the system and thus we better die immediately. */
if (! err)
{
@@ -551,6 +563,18 @@ _gcry_cipher_open_internal (gcry_cipher_hd_t *handle,
default:
break;
}
+
+ /* Setup defaults depending on the mode. */
+ switch (mode)
+ {
+ case GCRY_CIPHER_MODE_OCB:
+ h->u_mode.ocb.taglen = 16; /* Bytes. */
+ break;
+
+ default:
+ break;
+ }
+
}
}
@@ -716,6 +740,10 @@ cipher_reset (gcry_cipher_hd_t c)
break;
#endif
+ case GCRY_CIPHER_MODE_OCB:
+ memset (&c->u_mode.ocb, 0, sizeof c->u_mode.ocb);
+ break;
+
default:
break; /* u_mode unused by other modes. */
}
@@ -827,6 +855,10 @@ cipher_encrypt (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen,
inbuf, inbuflen);
break;
+ case GCRY_CIPHER_MODE_OCB:
+ rc = _gcry_cipher_ocb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+ break;
+
case GCRY_CIPHER_MODE_STREAM:
c->spec->stencrypt (&c->context.c,
outbuf, (byte*)/*arggg*/inbuf, inbuflen);
@@ -940,6 +972,10 @@ cipher_decrypt (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen,
inbuf, inbuflen);
break;
+ case GCRY_CIPHER_MODE_OCB:
+ rc = _gcry_cipher_ocb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+ break;
+
case GCRY_CIPHER_MODE_STREAM:
c->spec->stdecrypt (&c->context.c,
outbuf, (byte*)/*arggg*/inbuf, inbuflen);
@@ -1029,6 +1065,10 @@ _gcry_cipher_setiv (gcry_cipher_hd_t hd, const void *iv, size_t ivlen)
rc = _gcry_cipher_poly1305_setiv (hd, iv, ivlen);
break;
+ case GCRY_CIPHER_MODE_OCB:
+ rc = _gcry_cipher_ocb_set_nonce (hd, iv, ivlen);
+ break;
+
default:
rc = cipher_setiv (hd, iv, ivlen);
break;
@@ -1083,6 +1123,10 @@ _gcry_cipher_authenticate (gcry_cipher_hd_t hd, const void *abuf,
rc = _gcry_cipher_poly1305_authenticate (hd, abuf, abuflen);
break;
+ case GCRY_CIPHER_MODE_OCB:
+ rc = _gcry_cipher_ocb_authenticate (hd, abuf, abuflen);
+ break;
+
default:
log_error ("gcry_cipher_authenticate: invalid mode %d\n", hd->mode);
rc = GPG_ERR_INV_CIPHER_MODE;
@@ -1116,6 +1160,10 @@ _gcry_cipher_gettag (gcry_cipher_hd_t hd, void *outtag, size_t taglen)
rc = _gcry_cipher_poly1305_get_tag (hd, outtag, taglen);
break;
+ case GCRY_CIPHER_MODE_OCB:
+ rc = _gcry_cipher_ocb_get_tag (hd, outtag, taglen);
+ break;
+
default:
log_error ("gcry_cipher_gettag: invalid mode %d\n", hd->mode);
rc = GPG_ERR_INV_CIPHER_MODE;
@@ -1149,6 +1197,10 @@ _gcry_cipher_checktag (gcry_cipher_hd_t hd, const void *intag, size_t taglen)
rc = _gcry_cipher_poly1305_check_tag (hd, intag, taglen);
break;
+ case GCRY_CIPHER_MODE_OCB:
+ rc = _gcry_cipher_ocb_check_tag (hd, intag, taglen);
+ break;
+
default:
log_error ("gcry_cipher_checktag: invalid mode %d\n", hd->mode);
rc = GPG_ERR_INV_CIPHER_MODE;
@@ -1170,6 +1222,12 @@ _gcry_cipher_ctl (gcry_cipher_hd_t h, int cmd, void *buffer, size_t buflen)
cipher_reset (h);
break;
+ case GCRYCTL_FINALIZE:
+ if (!h || buffer || buflen)
+ return GPG_ERR_INV_ARG;
+ h->marks.finalize = 1;
+ break;
+
case GCRYCTL_CFB_SYNC:
cipher_sync( h );
break;
@@ -1222,6 +1280,29 @@ _gcry_cipher_ctl (gcry_cipher_hd_t h, int cmd, void *buffer, size_t buflen)
#endif
break;
+ case GCRYCTL_SET_TAGLEN:
+ if (!h || !buffer || buflen != sizeof(int) )
+ return GPG_ERR_INV_ARG;
+ switch (h->mode)
+ {
+ case GCRY_CIPHER_MODE_OCB:
+ switch (*(int*)buffer)
+ {
+ case 8: case 12: case 16:
+ h->u_mode.ocb.taglen = *(int*)buffer;
+ break;
+ default:
+ rc = GPG_ERR_INV_LENGTH; /* Invalid tag length. */
+ break;
+ }
+ break;
+
+ default:
+ rc =GPG_ERR_INV_CIPHER_MODE;
+ break;
+ }
+ break;
+
case GCRYCTL_DISABLE_ALGO:
/* This command expects NULL for H and BUFFER to point to an
integer with the algo number. */