summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-11 09:38:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-11 09:38:37 -0700
commit332a3392188e0ad966543c87b8da2b9d246f301d (patch)
treeac0d570590bffdd1924426adc5b255857d2f3297 /crypto
parenta9c86d42599519f3d83b5f46bdab25046fe47b84 (diff)
parent81bd5f6c966cf2f137c2759dfc78abdffcff055e (diff)
downloadlinux-3.10-332a3392188e0ad966543c87b8da2b9d246f301d.tar.gz
linux-3.10-332a3392188e0ad966543c87b8da2b9d246f301d.tar.bz2
linux-3.10-332a3392188e0ad966543c87b8da2b9d246f301d.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (102 commits) crypto: sha-s390 - Fix warnings in import function crypto: vmac - New hash algorithm for intel_txt support crypto: api - Do not displace newly registered algorithms crypto: ansi_cprng - Fix module initialization crypto: xcbc - Fix alignment calculation of xcbc_tfm_ctx crypto: fips - Depend on ansi_cprng crypto: blkcipher - Do not use eseqiv on stream ciphers crypto: ctr - Use chainiv on raw counter mode Revert crypto: fips - Select CPRNG crypto: rng - Fix typo crypto: talitos - add support for 36 bit addressing crypto: talitos - align locks on cache lines crypto: talitos - simplify hmac data size calculation crypto: mv_cesa - Add support for Orion5X crypto engine crypto: cryptd - Add support to access underlaying shash crypto: gcm - Use GHASH digest algorithm crypto: ghash - Add GHASH digest algorithm for GCM crypto: authenc - Convert to ahash crypto: api - Fix aligned ctx helper crypto: hmac - Prehash ipad/opad ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig30
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/ablkcipher.c29
-rw-r--r--crypto/aes_generic.c9
-rw-r--r--crypto/ahash.c336
-rw-r--r--crypto/algapi.c180
-rw-r--r--crypto/algboss.c5
-rw-r--r--crypto/ansi_cprng.c43
-rw-r--r--crypto/api.c54
-rw-r--r--crypto/authenc.c358
-rw-r--r--crypto/cryptd.c321
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/gcm.c580
-rw-r--r--crypto/ghash-generic.c170
-rw-r--r--crypto/hmac.c302
-rw-r--r--crypto/internal.h28
-rw-r--r--crypto/pcompress.c6
-rw-r--r--crypto/rng.c2
-rw-r--r--crypto/sha1_generic.c41
-rw-r--r--crypto/sha256_generic.c100
-rw-r--r--crypto/sha512_generic.c48
-rw-r--r--crypto/shash.c270
-rw-r--r--crypto/tcrypt.c22
-rw-r--r--crypto/testmgr.c30
-rw-r--r--crypto/testmgr.h16
-rw-r--r--crypto/vmac.c678
-rw-r--r--crypto/xcbc.c370
27 files changed, 2960 insertions, 1075 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 4dfdd03e708..26b5dd0cb56 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,11 +23,13 @@ comment "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
+ depends on CRYPTO_ANSI_CPRNG
help
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
certification. You should say no unless you know what
- this is.
+ this is. Note that CRYPTO_ANSI_CPRNG is requred if this
+ option is selected
config CRYPTO_ALGAPI
tristate
@@ -156,7 +158,7 @@ config CRYPTO_GCM
tristate "GCM/GMAC support"
select CRYPTO_CTR
select CRYPTO_AEAD
- select CRYPTO_GF128MUL
+ select CRYPTO_GHASH
help
Support for Galois/Counter Mode (GCM) and Galois Message
Authentication Code (GMAC). Required for IPSec.
@@ -267,6 +269,18 @@ config CRYPTO_XCBC
http://csrc.nist.gov/encryption/modes/proposedmodes/
xcbc-mac/xcbc-mac-spec.pdf
+config CRYPTO_VMAC
+ tristate "VMAC support"
+ depends on EXPERIMENTAL
+ select CRYPTO_HASH
+ select CRYPTO_MANAGER
+ help
+ VMAC is a message authentication algorithm designed for
+ very high speed on 64-bit architectures.
+
+ See also:
+ <http://fastcrypto.org/vmac>
+
comment "Digest"
config CRYPTO_CRC32C
@@ -289,6 +303,13 @@ config CRYPTO_CRC32C_INTEL
gain performance compared with software implementation.
Module will be crc32c-intel.
+config CRYPTO_GHASH
+ tristate "GHASH digest algorithm"
+ select CRYPTO_SHASH
+ select CRYPTO_GF128MUL
+ help
+ GHASH is message digest algorithm for GCM (Galois/Counter Mode).
+
config CRYPTO_MD4
tristate "MD4 digest algorithm"
select CRYPTO_HASH
@@ -780,13 +801,14 @@ comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG
tristate "Pseudo Random Number Generation for Cryptographic modules"
+ default m
select CRYPTO_AES
select CRYPTO_RNG
- select CRYPTO_FIPS
help
This option enables the generic pseudo random number generator
for cryptographic modules. Uses the Algorithm specified in
- ANSI X9.31 A.2.4
+ ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS
+ is selected
source "drivers/crypto/Kconfig"
diff --git a/crypto/Makefile b/crypto/Makefile
index 673d9f7c1bd..9e8f61908cb 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-objs := api.o cipher.o digest.o compress.o
+crypto-objs := api.o cipher.o compress.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
@@ -22,7 +22,6 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
-crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o
crypto_hash-objs += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
@@ -33,6 +32,7 @@ cryptomgr-objs := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
+obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
@@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
#
# generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e11ce37c710..f6f08336df5 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -14,6 +14,7 @@
*/
#include <crypto/internal/skcipher.h>
+#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -25,6 +26,8 @@
#include "internal.h"
+static const char *skcipher_default_geniv __read_mostly;
+
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
@@ -180,7 +183,14 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type);
const char *crypto_default_geniv(const struct crypto_alg *alg)
{
- return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv";
+ if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+ alg->cra_ablkcipher.ivsize) !=
+ alg->cra_blocksize)
+ return "chainiv";
+
+ return alg->cra_flags & CRYPTO_ALG_ASYNC ?
+ "eseqiv" : skcipher_default_geniv;
}
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -201,8 +211,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
int err;
larval = crypto_larval_lookup(alg->cra_driver_name,
+ (type & ~CRYPTO_ALG_TYPE_MASK) |
CRYPTO_ALG_TYPE_GIVCIPHER,
- CRYPTO_ALG_TYPE_MASK);
+ mask | CRYPTO_ALG_TYPE_MASK);
err = PTR_ERR(larval);
if (IS_ERR(larval))
goto out;
@@ -360,3 +371,17 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
+
+static int __init skcipher_module_init(void)
+{
+ skcipher_default_geniv = num_possible_cpus() > 1 ?
+ "eseqiv" : "chainiv";
+ return 0;
+}
+
+static void skcipher_module_exit(void)
+{
+}
+
+module_init(skcipher_module_init);
+module_exit(skcipher_module_exit);
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index b8b66ec3883..e78b7ee44a7 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
ctx->key_enc[6 * i + 11] = t; \
} while (0)
-#define loop8(i) do { \
+#define loop8tophalf(i) do { \
t = ror32(t, 8); \
t = ls_box(t) ^ rco_tab[i]; \
t ^= ctx->key_enc[8 * i]; \
@@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
ctx->key_enc[8 * i + 10] = t; \
t ^= ctx->key_enc[8 * i + 3]; \
ctx->key_enc[8 * i + 11] = t; \
+} while (0)
+
+#define loop8(i) do { \
+ loop8tophalf(i); \
t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \
ctx->key_enc[8 * i + 12] = t; \
t ^= ctx->key_enc[8 * i + 5]; \
@@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
ctx->key_enc[5] = le32_to_cpu(key[5]);
ctx->key_enc[6] = le32_to_cpu(key[6]);
t = ctx->key_enc[7] = le32_to_cpu(key[7]);
- for (i = 0; i < 7; ++i)
+ for (i = 0; i < 6; ++i)
loop8(i);
+ loop8tophalf(i);
break;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f3476374f76..33a4ff45f84 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -24,6 +24,19 @@
#include "internal.h"
+struct ahash_request_priv {
+ crypto_completion_t complete;
+ void *data;
+ u8 *result;
+ void *ubuf[] CRYPTO_MINALIGN_ATTR;
+};
+
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
@@ -132,36 +145,34 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_ATOMIC);
+ buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
- ret = ahash->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ ret = tfm->setkey(tfm, alignbuffer, keylen);
+ kzfree(buffer);
return ret;
}
-static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)key & alignmask)
return ahash_setkey_unaligned(tfm, key, keylen);
- return ahash->setkey(tfm, key, keylen);
+ return tfm->setkey(tfm, key, keylen);
}
+EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
@@ -169,44 +180,221 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
return -ENOSYS;
}
-int crypto_ahash_import(struct ahash_request *req, const u8 *in)
+static inline unsigned int ahash_align_buffer_size(unsigned len,
+ unsigned long mask)
+{
+ return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
+}
+
+static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+{
+ struct ahash_request_priv *priv = req->priv;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
+ kzfree(priv);
+}
+
+static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ ahash_op_unaligned_finish(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_op_unaligned(struct ahash_request *req,
+ int (*op)(struct ahash_request *))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ struct ahash_request_priv *priv;
+ int err;
+
+ priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
- memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm));
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
- if (alg->reinit)
- alg->reinit(req);
+ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
+ req->base.complete = ahash_op_unaligned_done;
+ req->base.data = req;
+ req->priv = priv;
- return 0;
+ err = op(req);
+ ahash_op_unaligned_finish(req, err);
+
+ return err;
}
-EXPORT_SYMBOL_GPL(crypto_ahash_import);
-static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
+static int crypto_ahash_op(struct ahash_request *req,
+ int (*op)(struct ahash_request *))
{
- return alg->cra_ctxsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+
+ if ((unsigned long)req->result & alignmask)
+ return ahash_op_unaligned(req, op);
+
+ return op(req);
}
-static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+int crypto_ahash_final(struct ahash_request *req)
{
- struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
- struct ahash_tfm *crt = &tfm->crt_ahash;
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_final);
- if (alg->digestsize > PAGE_SIZE / 8)
- return -EINVAL;
+int crypto_ahash_finup(struct ahash_request *req)
+{
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_finup);
+
+int crypto_ahash_digest(struct ahash_request *req)
+{
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_digest);
+
+static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+{
+ struct ahash_request_priv *priv = req->priv;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
- crt->init = alg->init;
- crt->update = alg->update;
- crt->final = alg->final;
- crt->digest = alg->digest;
- crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey;
- crt->digestsize = alg->digestsize;
+ kzfree(priv);
+}
+
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ ahash_def_finup_finish2(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_def_finup_finish1(struct ahash_request *req, int err)
+{
+ if (err)
+ goto out;
+
+ req->base.complete = ahash_def_finup_done2;
+ req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_ahash_reqtfm(req)->final(req);
+
+out:
+ ahash_def_finup_finish2(req, err);
+ return err;
+}
+
+static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ err = ahash_def_finup_finish1(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_def_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ struct ahash_request_priv *priv;
+
+ priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
+
+ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
+ req->base.complete = ahash_def_finup_done1;
+ req->base.data = req;
+ req->priv = priv;
+
+ return ahash_def_finup_finish1(req, tfm->update(req));
+}
+
+static int ahash_no_export(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
+static int ahash_no_import(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(hash);
+
+ hash->setkey = ahash_nosetkey;
+ hash->export = ahash_no_export;
+ hash->import = ahash_no_import;
+
+ if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
+ return crypto_init_shash_ops_async(tfm);
+
+ hash->init = alg->init;
+ hash->update = alg->update;
+ hash->final = alg->final;
+ hash->finup = alg->finup ?: ahash_def_finup;
+ hash->digest = alg->digest;
+
+ if (alg->setkey)
+ hash->setkey = alg->setkey;
+ if (alg->export)
+ hash->export = alg->export;
+ if (alg->import)
+ hash->import = alg->import;
return 0;
}
+static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
+{
+ if (alg->cra_type == &crypto_ahash_type)
+ return alg->cra_ctxsize;
+
+ return sizeof(struct crypto_shash *);
+}
+
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
@@ -215,17 +403,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize);
+ seq_printf(m, "digestsize : %u\n",
+ __crypto_hash_alg_common(alg)->digestsize);
}
const struct crypto_type crypto_ahash_type = {
- .ctxsize = crypto_ahash_ctxsize,
- .init = crypto_init_ahash_ops,
+ .extsize = crypto_ahash_extsize,
+ .init_tfm = crypto_ahash_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .tfmsize = offsetof(struct crypto_ahash, base),
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);
+struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
+
+static int ahash_prepare_alg(struct ahash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+
+ if (alg->halg.digestsize > PAGE_SIZE / 8 ||
+ alg->halg.statesize > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ base->cra_type = &crypto_ahash_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
+
+ return 0;
+}
+
+int crypto_register_ahash(struct ahash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+ int err;
+
+ err = ahash_prepare_alg(alg);
+ if (err)
+ return err;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_ahash);
+
+int crypto_unregister_ahash(struct ahash_alg *alg)
+{
+ return crypto_unregister_alg(&alg->halg.base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
+
+int ahash_register_instance(struct crypto_template *tmpl,
+ struct ahash_instance *inst)
+{
+ int err;
+
+ err = ahash_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(ahash_register_instance);
+
+void ahash_free_instance(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(ahash_instance(inst));
+}
+EXPORT_SYMBOL_GPL(ahash_free_instance);
+
+int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
+ struct hash_alg_common *alg,
+ struct crypto_instance *inst)
+{
+ return crypto_init_spawn2(&spawn->base, &alg->base, inst,
+ &crypto_ahash_type);
+}
+EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
+
+struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+{
+ struct crypto_alg *alg;
+
+ alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
+ return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
+}
+EXPORT_SYMBOL_GPL(ahash_attr_alg);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index df0863d5699..f149b1c8b76 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -81,16 +81,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
crypto_tmpl_put(tmpl);
}
+static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
+ struct list_head *stack,
+ struct list_head *top,
+ struct list_head *secondary_spawns)
+{
+ struct crypto_spawn *spawn, *n;
+
+ if (list_empty(stack))
+ return NULL;
+
+ spawn = list_first_entry(stack, struct crypto_spawn, list);
+ n = list_entry(spawn->list.next, struct crypto_spawn, list);
+
+ if (spawn->alg && &n->list != stack && !n->alg)
+ n->alg = (n->list.next == stack) ? alg :
+ &list_entry(n->list.next, struct crypto_spawn,
+ list)->inst->alg;
+
+ list_move(&spawn->list, secondary_spawns);
+
+ return &n->list == stack ? top : &n->inst->alg.cra_users;
+}
+
static void crypto_remove_spawn(struct crypto_spawn *spawn,
- struct list_head *list,
- struct list_head *secondary_spawns)
+ struct list_head *list)
{
struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl;
- list_del_init(&spawn->list);
- spawn->alg = NULL;
-
if (crypto_is_dead(&inst->alg))
return;
@@ -106,25 +125,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
- list_splice(&inst->alg.cra_users, secondary_spawns);
+ BUG_ON(!list_empty(&inst->alg.cra_users));
}
-static void crypto_remove_spawns(struct list_head *spawns,
- struct list_head *list, u32 new_type)
+static void crypto_remove_spawns(struct crypto_alg *alg,
+ struct list_head *list,
+ struct crypto_alg *nalg)
{
+ u32 new_type = (nalg ?: alg)->cra_flags;
struct crypto_spawn *spawn, *n;
LIST_HEAD(secondary_spawns);
+ struct list_head *spawns;
+ LIST_HEAD(stack);
+ LIST_HEAD(top);
+ spawns = &alg->cra_users;
list_for_each_entry_safe(spawn, n, spawns, list) {
if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
continue;
- crypto_remove_spawn(spawn, list, &secondary_spawns);
+ list_move(&spawn->list, &top);
}
- while (!list_empty(&secondary_spawns)) {
- list_for_each_entry_safe(spawn, n, &secondary_spawns, list)
- crypto_remove_spawn(spawn, list, &secondary_spawns);
+ spawns = &top;
+ do {
+ while (!list_empty(spawns)) {
+ struct crypto_instance *inst;
+
+ spawn = list_first_entry(spawns, struct crypto_spawn,
+ list);
+ inst = spawn->inst;
+
+ BUG_ON(&inst->alg == alg);
+
+ list_move(&spawn->list, &stack);
+
+ if (&inst->alg == nalg)
+ break;
+
+ spawn->alg = NULL;
+ spawns = &inst->alg.cra_users;
+ }
+ } while ((spawns = crypto_more_spawns(alg, &stack, &top,
+ &secondary_spawns)));
+
+ list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
+ if (spawn->alg)
+ list_move(&spawn->list, &spawn->alg->cra_users);
+ else
+ crypto_remove_spawn(spawn, list);
}
}
@@ -258,7 +307,7 @@ found:
q->cra_priority > alg->cra_priority)
continue;
- crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags);
+ crypto_remove_spawns(q, &list, alg);
}
complete:
@@ -330,7 +379,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
- crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags);
+ crypto_remove_spawns(alg, list, NULL);
return 0;
}
@@ -488,20 +537,38 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
}
EXPORT_SYMBOL_GPL(crypto_init_spawn);
+int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
+ struct crypto_instance *inst,
+ const struct crypto_type *frontend)
+{
+ int err = -EINVAL;
+
+ if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
+ goto out;
+
+ spawn->frontend = frontend;
+ err = crypto_init_spawn(spawn, alg, inst, frontend->maskset);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_init_spawn2);
+
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
+ if (!spawn->alg)
+ return;
+
down_write(&crypto_alg_sem);
list_del(&spawn->list);
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
-struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
- u32 mask)
+static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_alg *alg2;
- struct crypto_tfm *tfm;
down_read(&crypto_alg_sem);
alg = spawn->alg;
@@ -516,6 +583,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
return ERR_PTR(-EAGAIN);
}
+ return alg;
+}
+
+struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
+ u32 mask)
+{
+ struct crypto_alg *alg;
+ struct crypto_tfm *tfm;
+
+ alg = crypto_spawn_alg(spawn);
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
tfm = ERR_PTR(-EINVAL);
if (unlikely((alg->cra_flags ^ type) & mask))
goto out_put_alg;
@@ -532,6 +612,27 @@ out_put_alg:
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
+void *crypto_spawn_tfm2(struct crypto_spawn *spawn)
+{
+ struct crypto_alg *alg;
+ struct crypto_tfm *tfm;
+
+ alg = crypto_spawn_alg(spawn);
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
+ tfm = crypto_create_tfm(alg, spawn->frontend);
+ if (IS_ERR(tfm))
+ goto out_put_alg;
+
+ return tfm;
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+
int crypto_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&crypto_chain, nb);
@@ -595,7 +696,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask)
{
const char *name;
int err;
@@ -605,9 +708,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
if (IS_ERR(name))
return ERR_PTR(err);
- return crypto_alg_mod_lookup(name, type, mask);
+ return crypto_find_alg(name, frontend, type, mask);
}
-EXPORT_SYMBOL_GPL(crypto_attr_alg);
+EXPORT_SYMBOL_GPL(crypto_attr_alg2);
int crypto_attr_u32(struct rtattr *rta, u32 *num)
{
@@ -627,17 +730,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
}
EXPORT_SYMBOL_GPL(crypto_attr_u32);
-struct crypto_instance *crypto_alloc_instance(const char *name,
- struct crypto_alg *alg)
+void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
+ unsigned int head)
{
struct crypto_instance *inst;
- struct crypto_spawn *spawn;
+ char *p;
int err;
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
+ p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
+ GFP_KERNEL);
+ if (!p)
return ERR_PTR(-ENOMEM);
+ inst = (void *)(p + head);
+
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
@@ -647,6 +753,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
+ return p;
+
+err_free_inst:
+ kfree(p);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_instance2);
+
+struct crypto_instance *crypto_alloc_instance(const char *name,
+ struct crypto_alg *alg)
+{
+ struct crypto_instance *inst;
+ struct crypto_spawn *spawn;
+ int err;
+
+ inst = crypto_alloc_instance2(name, alg, 0);
+ if (IS_ERR(inst))
+ goto out;
+
spawn = crypto_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
@@ -658,7 +783,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
err_free_inst:
kfree(inst);
- return ERR_PTR(err);
+ inst = ERR_PTR(err);
+
+out:
+ return inst;
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 9908dd830c2..412241ce4cf 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -68,6 +68,11 @@ static int cryptomgr_probe(void *data)
goto err;
do {
+ if (tmpl->create) {
+ err = tmpl->create(tmpl, param->tb);
+ continue;
+ }
+
inst = tmpl->alloc(param->tb);
if (IS_ERR(inst))
err = PTR_ERR(inst);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index d80ed4c1e00..3aa6e3834bf 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -187,7 +187,6 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
/* Our exported functions */
static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
{
- unsigned long flags;
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
@@ -196,7 +195,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
if (nbytes < 0)
return -EINVAL;
- spin_lock_irqsave(&ctx->prng_lock, flags);
+ spin_lock_bh(&ctx->prng_lock);
err = -EINVAL;
if (ctx->flags & PRNG_NEED_RESET)
@@ -268,7 +267,7 @@ empty_rbuf:
goto remainder;
done:
- spin_unlock_irqrestore(&ctx->prng_lock, flags);
+ spin_unlock_bh(&ctx->prng_lock);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
err, ctx);
return err;
@@ -284,10 +283,9 @@ static int reset_prng_context(struct prng_context *ctx,
unsigned char *V, unsigned char *DT)
{
int ret;
- int rc = -EINVAL;
unsigned char *prng_key;
- spin_lock(&ctx->prng_lock);
+ spin_lock_bh(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
@@ -308,34 +306,20 @@ static int reset_prng_context(struct prng_context *ctx,
memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
- if (ctx->tfm)
- crypto_free_cipher(ctx->tfm);
-
- ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tfm)) {
- dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
- ctx);
- ctx->tfm = NULL;
- goto out;
- }
-
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_cipher_get_flags(ctx->tfm));
- crypto_free_cipher(ctx->tfm);
goto out;
}
- rc = 0;
+ ret = 0;
ctx->flags &= ~PRNG_NEED_RESET;
out:
- spin_unlock(&ctx->prng_lock);
-
- return rc;
-
+ spin_unlock_bh(&ctx->prng_lock);
+ return ret;
}
static int cprng_init(struct crypto_tfm *tfm)
@@ -343,6 +327,12 @@ static int cprng_init(struct crypto_tfm *tfm)
struct prng_context *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->prng_lock);
+ ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(ctx->tfm)) {
+ dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
+ ctx);
+ return PTR_ERR(ctx->tfm);
+ }
if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
return -EINVAL;
@@ -418,17 +408,10 @@ static struct crypto_alg rng_alg = {
/* Module initalization */
static int __init prng_mod_init(void)
{
- int ret = 0;
-
if (fips_enabled)
rng_alg.cra_priority += 200;
- ret = crypto_register_alg(&rng_alg);
-
- if (ret)
- goto out;
-out:
- return 0;
+ return crypto_register_alg(&rng_alg);
}
static void __exit prng_mod_fini(void)
diff --git a/crypto/api.c b/crypto/api.c
index d5944f92b41..798526d9053 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -285,13 +285,6 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_ops(tfm);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
- CRYPTO_ALG_TYPE_HASH_MASK)
- return crypto_init_digest_ops_async(tfm);
- else
- return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm);
@@ -318,11 +311,7 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
case CRYPTO_ALG_TYPE_CIPHER:
crypto_exit_cipher_ops(tfm);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- crypto_exit_digest_ops(tfm);
- break;
-
+
case CRYPTO_ALG_TYPE_COMPRESS:
crypto_exit_compress_ops(tfm);
break;
@@ -349,11 +338,7 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
case CRYPTO_ALG_TYPE_CIPHER:
len += crypto_cipher_ctxsize(alg);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- len += crypto_digest_ctxsize(alg);
- break;
-
+
case CRYPTO_ALG_TYPE_COMPRESS:
len += crypto_compress_ctxsize(alg);
break;
@@ -472,7 +457,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
int err = -ENOMEM;
tfmsize = frontend->tfmsize;
- total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend);
+ total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
mem = kzalloc(total, GFP_KERNEL);
if (mem == NULL)
@@ -481,7 +466,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
- err = frontend->init_tfm(tfm, frontend);
+ err = frontend->init_tfm(tfm);
if (err)
goto out_free_tfm;
@@ -503,6 +488,27 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_create_tfm);
+struct crypto_alg *crypto_find_alg(const char *alg_name,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask)
+{
+ struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
+ crypto_alg_mod_lookup;
+
+ if (frontend) {
+ type &= frontend->maskclear;
+ mask &= frontend->maskclear;
+ type |= frontend->type;
+ mask |= frontend->maskset;
+
+ if (frontend->lookup)
+ lookup = frontend->lookup;
+ }
+
+ return lookup(alg_name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_find_alg);
+
/*
* crypto_alloc_tfm - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
@@ -526,21 +532,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask)
{
- struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
void *tfm;
int err;
- type &= frontend->maskclear;
- mask &= frontend->maskclear;
- type |= frontend->type;
- mask |= frontend->maskset;
-
- lookup = frontend->lookup ?: crypto_alg_mod_lookup;
-
for (;;) {
struct crypto_alg *alg;
- alg = lookup(alg_name, type, mask);
+ alg = crypto_find_alg(alg_name, frontend, type, mask);
if (IS_ERR(alg)) {
err = PTR_ERR(alg);
goto err;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 5793b64c81a..4d6f49a5dae 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -23,24 +23,36 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
+
struct authenc_instance_ctx {
- struct crypto_spawn auth;
+ struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc;
};
struct crypto_authenc_ctx {
- spinlock_t auth_lock;
- struct crypto_hash *auth;
+ unsigned int reqoff;
+ struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
};
+struct authenc_request_ctx {
+ unsigned int cryptlen;
+ struct scatterlist *sg;
+ struct scatterlist asg[2];
+ struct scatterlist cipher[2];
+ crypto_completion_t complete;
+ crypto_completion_t update_complete;
+ char tail[];
+};
+
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_hash *auth = ctx->auth;
+ struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
@@ -64,11 +76,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
authkeylen = keylen - enckeylen;
- crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
- crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) &
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_hash_setkey(auth, key, authkeylen);
- crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) &
+ err = crypto_ahash_setkey(auth, key, authkeylen);
+ crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
if (err)
@@ -103,40 +115,198 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
sg_mark_end(head);
}
-static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags,
- struct scatterlist *cipher,
- unsigned int cryptlen)
+static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc), 1);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc), 1);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
+ int err)
{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_hash *auth = ctx->auth;
- struct hash_desc desc = {
- .tfm = auth,
- .flags = aead_request_flags(req) & flags,
- };
- u8 *hash = aead_request_ctx(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc);
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static void authenc_verify_ahash_done(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc);
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
+{
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ u8 *hash = areq_ctx->tail;
int err;
- hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth),
- crypto_hash_alignmask(auth) + 1);
+ hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
+ ahash_request_set_tfm(ahreq, auth);
- spin_lock_bh(&ctx->auth_lock);
- err = crypto_hash_init(&desc);
+ err = crypto_ahash_init(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->update_complete, req);
- err = crypto_hash_update(&desc, req->assoc, req->assoclen);
+ err = crypto_ahash_update(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->complete, req);
- err = crypto_hash_update(&desc, cipher, cryptlen);
+ err = crypto_ahash_finup(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
- err = crypto_hash_final(&desc, hash);
-auth_unlock:
- spin_unlock_bh(&ctx->auth_lock);
+ return hash;
+}
+
+static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
+{
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ u8 *hash = areq_ctx->tail;
+ int err;
+ hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_digest(ahreq);
if (err)
return ERR_PTR(err);
@@ -147,11 +317,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
unsigned int flags)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *dst = req->dst;
- struct scatterlist cipher[2];
- struct page *dstp;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *asg = areq_ctx->asg;
unsigned int ivsize = crypto_aead_ivsize(authenc);
- unsigned int cryptlen;
+ unsigned int cryptlen = req->cryptlen;
+ authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
+ struct page *dstp;
u8 *vdst;
u8 *hash;
@@ -163,10 +337,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
sg_set_buf(cipher, iv, ivsize);
authenc_chain(cipher, dst, vdst == iv + ivsize);
dst = cipher;
+ cryptlen += ivsize;
}
- cryptlen = req->cryptlen + ivsize;
- hash = crypto_authenc_hash(req, flags, dst, cryptlen);
+ if (sg_is_last(assoc)) {
+ authenc_ahash_fn = crypto_authenc_ahash;
+ sg_init_table(asg, 2);
+ sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
+ authenc_chain(asg, dst, 0);
+ dst = asg;
+ cryptlen += req->assoclen;
+ }
+
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->sg = dst;
+
+ areq_ctx->complete = authenc_geniv_ahash_done;
+ areq_ctx->update_complete = authenc_geniv_ahash_update_done;
+
+ hash = authenc_ahash_fn(req, flags);
if (IS_ERR(hash))
return PTR_ERR(hash);
@@ -256,22 +445,25 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
}
static int crypto_authenc_verify(struct aead_request *req,
- struct scatterlist *cipher,
- unsigned int cryptlen)
+ authenc_ahash_t authenc_ahash_fn)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
u8 *ohash;
u8 *ihash;
unsigned int authsize;
- ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher,
- cryptlen);
+ areq_ctx->complete = authenc_verify_ahash_done;
+ areq_ctx->complete = authenc_verify_ahash_update_done;
+
+ ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
if (IS_ERR(ohash))
return PTR_ERR(ohash);
authsize = crypto_aead_authsize(authenc);
ihash = ohash + authsize;
- scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0);
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
}
@@ -279,10 +471,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
unsigned int cryptlen)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *src = req->src;
- struct scatterlist cipher[2];
- struct page *srcp;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *asg = areq_ctx->asg;
unsigned int ivsize = crypto_aead_ivsize(authenc);
+ authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
+ struct page *srcp;
u8 *vsrc;
srcp = sg_page(src);
@@ -293,9 +489,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
sg_set_buf(cipher, iv, ivsize);
authenc_chain(cipher, src, vsrc == iv + ivsize);
src = cipher;
+ cryptlen += ivsize;
+ }
+
+ if (sg_is_last(assoc)) {
+ authenc_ahash_fn = crypto_authenc_ahash;
+ sg_init_table(asg, 2);
+ sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
+ authenc_chain(asg, src, 0);
+ src = asg;
+ cryptlen += req->assoclen;
}
- return crypto_authenc_verify(req, src, cryptlen + ivsize);
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->sg = src;
+
+ return crypto_authenc_verify(req, authenc_ahash_fn);
}
static int crypto_authenc_decrypt(struct aead_request *req)
@@ -326,38 +535,41 @@ static int crypto_authenc_decrypt(struct aead_request *req)
static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_hash *auth;
+ struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
int err;
- auth = crypto_spawn_hash(&ictx->auth);
+ auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
+ ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
+ crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
- goto err_free_hash;
+ goto err_free_ahash;
ctx->auth = auth;
ctx->enc = enc;
+
tfm->crt_aead.reqsize = max_t(unsigned int,
- (crypto_hash_alignmask(auth) &
- ~(crypto_tfm_ctx_alignment() - 1)) +
- crypto_hash_digestsize(auth) * 2,
- sizeof(struct skcipher_givcrypt_request) +
- crypto_ablkcipher_reqsize(enc) +
- crypto_ablkcipher_ivsize(enc));
-
- spin_lock_init(&ctx->auth_lock);
+ crypto_ahash_reqsize(auth) + ctx->reqoff +
+ sizeof(struct authenc_request_ctx) +
+ sizeof(struct ahash_request),
+ sizeof(struct skcipher_givcrypt_request) +
+ crypto_ablkcipher_reqsize(enc) +
+ crypto_ablkcipher_ivsize(enc));
return 0;
-err_free_hash:
- crypto_free_hash(auth);
+err_free_ahash:
+ crypto_free_ahash(auth);
return err;
}
@@ -365,7 +577,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx->auth);
+ crypto_free_ahash(ctx->auth);
crypto_free_ablkcipher(ctx->enc);
}
@@ -373,7 +585,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
- struct crypto_alg *auth;
+ struct hash_alg_common *auth;
+ struct crypto_alg *auth_base;
struct crypto_alg *enc;
struct authenc_instance_ctx *ctx;
const char *enc_name;
@@ -387,11 +600,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
- auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
+ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(auth))
return ERR_PTR(PTR_ERR(auth));
+ auth_base = &auth->base;
+
enc_name = crypto_attr_alg_name(tb[2]);
err = PTR_ERR(enc_name);
if (IS_ERR(enc_name))
@@ -404,7 +619,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
ctx = crypto_instance_ctx(inst);
- err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK);
+ err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
if (err)
goto err_free_inst;
@@ -419,28 +634,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
- "authenc(%s,%s)", auth->cra_name, enc->cra_name) >=
+ "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "authenc(%s,%s)", auth->cra_driver_name,
+ "authenc(%s,%s)", auth_base->cra_driver_name,
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority;
+ inst->alg.cra_priority = enc->cra_priority *
+ 10 + auth_base->cra_priority;
inst->alg.cra_blocksize = enc->cra_blocksize;
- inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask;
+ inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
- inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ?
- auth->cra_hash.digestsize :
- auth->cra_type ?
- __crypto_shash_alg(auth)->digestsize :
- auth->cra_digest.dia_digestsize;
+ inst->alg.cra_aead.maxauthsize = auth->digestsize;
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
@@ -453,13 +665,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
out:
- crypto_mod_put(auth);
+ crypto_mod_put(auth_base);
return inst;
err_drop_enc:
crypto_drop_skcipher(&ctx->enc);
err_drop_auth:
- crypto_drop_spawn(&ctx->auth);
+ crypto_drop_ahash(&ctx->auth);
err_free_inst:
kfree(inst);
out_put_auth:
@@ -472,7 +684,7 @@ static void crypto_authenc_free(struct crypto_instance *inst)
struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc);
- crypto_drop_spawn(&ctx->auth);
+ crypto_drop_ahash(&ctx->auth);
kfree(inst);
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ae5fa99d5d3..35335825a4e 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -39,6 +39,11 @@ struct cryptd_instance_ctx {
struct cryptd_queue *queue;
};
+struct hashd_instance_ctx {
+ struct crypto_shash_spawn spawn;
+ struct cryptd_queue *queue;
+};
+
struct cryptd_blkcipher_ctx {
struct crypto_blkcipher *child;
};
@@ -48,11 +53,12 @@ struct cryptd_blkcipher_request_ctx {
};
struct cryptd_hash_ctx {
- struct crypto_hash *child;
+ struct crypto_shash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
+ struct shash_desc desc;
};
static void cryptd_queue_worker(struct work_struct *work);
@@ -249,32 +255,24 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
crypto_free_blkcipher(ctx->child);
}
-static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
- struct cryptd_queue *queue)
+static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
+ unsigned int tail)
{
+ char *p;
struct crypto_instance *inst;
- struct cryptd_instance_ctx *ctx;
int err;
- inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- if (!inst) {
- inst = ERR_PTR(-ENOMEM);
- goto out;
- }
+ p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ inst = (void *)(p + head);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
- ctx = crypto_instance_ctx(inst);
- err = crypto_init_spawn(&ctx->spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
-
- ctx->queue = queue;
-
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
@@ -282,29 +280,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
- return inst;
+ return p;
out_free_inst:
- kfree(inst);
- inst = ERR_PTR(err);
+ kfree(p);
+ p = ERR_PTR(err);
goto out;
}
-static struct crypto_instance *cryptd_alloc_blkcipher(
- struct rtattr **tb, struct cryptd_queue *queue)
+static int cryptd_create_blkcipher(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ struct cryptd_queue *queue)
{
+ struct cryptd_instance_ctx *ctx;
struct crypto_instance *inst;
struct crypto_alg *alg;
+ int err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ return PTR_ERR(alg);
- inst = cryptd_alloc_instance(alg, queue);
+ inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
+ ctx = crypto_instance_ctx(inst);
+ ctx->queue = queue;
+
+ err = crypto_init_spawn(&ctx->spawn, alg, inst,
+ CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+ if (err)
+ goto out_free_inst;
+
inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ablkcipher_type;
@@ -323,26 +333,34 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_spawn(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
+
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_spawn *spawn = &ictx->spawn;
+ struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_shash_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_hash *cipher;
+ struct crypto_shash *hash;
- cipher = crypto_spawn_hash(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
+ hash = crypto_spawn_shash(spawn);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
- ctx->child = cipher;
- tfm->crt_ahash.reqsize =
- sizeof(struct cryptd_hash_request_ctx);
+ ctx->child = hash;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct cryptd_hash_request_ctx) +
+ crypto_shash_descsize(hash));
return 0;
}
@@ -350,22 +368,22 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx->child);
+ crypto_free_shash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_hash *child = ctx->child;
+ struct crypto_shash *child = ctx->child;
int err;
- crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_hash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_shash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
@@ -385,21 +403,19 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
-
- rctx = ahash_request_ctx(req);
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_crt(child)->init(&desc);
+ err = crypto_shash_init(desc);
req->base.complete = rctx->complete;
@@ -416,23 +432,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req)
static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
+ struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_crt(child)->update(&desc,
- req->src,
- req->nbytes);
+ err = shash_ahash_update(req, &rctx->desc);
req->base.complete = rctx->complete;
@@ -449,21 +457,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req)
static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
-
- rctx = ahash_request_ctx(req);
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_crt(child)->final(&desc, req->result);
+ err = crypto_shash_final(&rctx->desc, req->result);
req->base.complete = rctx->complete;
@@ -478,26 +478,44 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
-static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- rctx = ahash_request_ctx(req);
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ err = shash_ahash_finup(req, &rctx->desc);
+
+ req->base.complete = rctx->complete;
+
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int cryptd_hash_finup_enqueue(struct ahash_request *req)
+{
+ return cryptd_hash_enqueue(req, cryptd_hash_finup);
+}
+
+static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+{
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_crt(child)->digest(&desc,
- req->src,
- req->nbytes,
- req->result);
+ err = shash_ahash_digest(req, desc);
req->base.complete = rctx->complete;
@@ -512,64 +530,108 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
-static struct crypto_instance *cryptd_alloc_hash(
- struct rtattr **tb, struct cryptd_queue *queue)
+static int cryptd_hash_export(struct ahash_request *req, void *out)
{
- struct crypto_instance *inst;
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_export(&rctx->desc, out);
+}
+
+static int cryptd_hash_import(struct ahash_request *req, const void *in)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_import(&rctx->desc, in);
+}
+
+static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ struct cryptd_queue *queue)
+{
+ struct hashd_instance_ctx *ctx;
+ struct ahash_instance *inst;
+ struct shash_alg *salg;
struct crypto_alg *alg;
+ int err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
- if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
- inst = cryptd_alloc_instance(alg, queue);
+ alg = &salg->base;
+ inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
+ sizeof(*ctx));
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
- inst->alg.cra_type = &crypto_ahash_type;
+ ctx = ahash_instance_ctx(inst);
+ ctx->queue = queue;
- inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
- inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+ err = crypto_init_shash_spawn(&ctx->spawn, salg,
+ ahash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
- inst->alg.cra_init = cryptd_hash_init_tfm;
- inst->alg.cra_exit = cryptd_hash_exit_tfm;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
- inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
- inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
- inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
- inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
- inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
+ inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+
+ inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
+ inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+
+ inst->alg.init = cryptd_hash_init_enqueue;
+ inst->alg.update = cryptd_hash_update_enqueue;
+ inst->alg.final = cryptd_hash_final_enqueue;
+ inst->alg.finup = cryptd_hash_finup_enqueue;
+ inst->alg.export = cryptd_hash_export;
+ inst->alg.import = cryptd_hash_import;
+ inst->alg.setkey = cryptd_hash_setkey;
+ inst->alg.digest = cryptd_hash_digest_enqueue;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_shash(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static struct cryptd_queue queue;
-static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
+static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- return cryptd_alloc_blkcipher(tb, &queue);
+ return cryptd_create_blkcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
- return cryptd_alloc_hash(tb, &queue);
+ return cryptd_create_hash(tmpl, tb, &queue);
}
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
static void cryptd_free(struct crypto_instance *inst)
{
struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
+ struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
+
+ switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_drop_shash(&hctx->spawn);
+ kfree(ahash_instance(inst));
+ return;
+ }
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
@@ -577,7 +639,7 @@ static void cryptd_free(struct crypto_instance *inst)
static struct crypto_template cryptd_tmpl = {
.name = "cryptd",
- .alloc = cryptd_alloc,
+ .create = cryptd_create,
.free = cryptd_free,
.module = THIS_MODULE,
};
@@ -620,6 +682,41 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
+struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_ahash *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_ahash(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return __cryptd_ahash_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
+
+struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
+{
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
+
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_ahash_child);
+
+void cryptd_free_ahash(struct cryptd_ahash *tfm)
+{
+ crypto_free_ahash(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_ahash);
+
static int __init cryptd_init(void)
{
int err;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 2d7425f0e7b..6c3bfabb9d1 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
+ inst->alg.cra_blkcipher.geniv = "chainiv";
+
out:
crypto_mod_put(alg);
return inst;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e70afd0c73d..5fc3292483e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -11,7 +11,10 @@
#include <crypto/gf128mul.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
+#include <crypto/hash.h>
+#include "internal.h"
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -21,11 +24,12 @@
struct gcm_instance_ctx {
struct crypto_skcipher_spawn ctr;
+ struct crypto_ahash_spawn ghash;
};
struct crypto_gcm_ctx {
struct crypto_ablkcipher *ctr;
- struct gf128mul_4k *gf128;
+ struct crypto_ahash *ghash;
};
struct crypto_rfc4106_ctx {
@@ -34,10 +38,9 @@ struct crypto_rfc4106_ctx {
};
struct crypto_gcm_ghash_ctx {
- u32 bytes;
- u32 flags;
- struct gf128mul_4k *gf128;
- u8 buffer[16];
+ unsigned int cryptlen;
+ struct scatterlist *src;
+ crypto_completion_t complete;
};
struct crypto_gcm_req_priv_ctx {
@@ -45,8 +48,11 @@ struct crypto_gcm_req_priv_ctx {
u8 iauth_tag[16];
struct scatterlist src[2];
struct scatterlist dst[2];
- struct crypto_gcm_ghash_ctx ghash;
- struct ablkcipher_request abreq;
+ struct crypto_gcm_ghash_ctx ghash_ctx;
+ union {
+ struct ahash_request ahreq;
+ struct ablkcipher_request abreq;
+ } u;
};
struct crypto_gcm_setkey_result {
@@ -54,6 +60,8 @@ struct crypto_gcm_setkey_result {
struct completion completion;
};
+static void *gcm_zeroes;
+
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
@@ -62,113 +70,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
-static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags,
- struct gf128mul_4k *gf128)
-{
- ctx->bytes = 0;
- ctx->flags = flags;
- ctx->gf128 = gf128;
- memset(ctx->buffer, 0, 16);
-}
-
-static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
- const u8 *src, unsigned int srclen)
-{
- u8 *dst = ctx->buffer;
-
- if (ctx->bytes) {
- int n = min(srclen, ctx->bytes);
- u8 *pos = dst + (16 - ctx->bytes);
-
- ctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
-
- if (!ctx->bytes)
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- while (srclen >= 16) {
- crypto_xor(dst, src, 16);
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- src += 16;
- srclen -= 16;
- }
-
- if (srclen) {
- ctx->bytes = 16 - srclen;
- while (srclen--)
- *dst++ ^= *src++;
- }
-}
-
-static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
- struct scatterlist *sg, int len)
-{
- struct scatter_walk walk;
- u8 *src;
- int n;
-
- if (!len)
- return;
-
- scatterwalk_start(&walk, sg);
-
- while (len) {
- n = scatterwalk_clamp(&walk, len);
-
- if (!n) {
- scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
- n = scatterwalk_clamp(&walk, len);
- }
-
- src = scatterwalk_map(&walk, 0);
-
- crypto_gcm_ghash_update(ctx, src, n);
- len -= n;
-
- scatterwalk_unmap(src, 0);
- scatterwalk_advance(&walk, n);
- scatterwalk_done(&walk, 0, len);
- if (len)
- crypto_yield(ctx->flags);
- }
-}
-
-static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx)
-{
- u8 *dst = ctx->buffer;
-
- if (ctx->bytes) {
- u8 *tmp = dst + (16 - ctx->bytes);
-
- while (ctx->bytes--)
- *tmp++ ^= 0;
-
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- ctx->bytes = 0;
-}
-
-static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
- unsigned int authlen,
- unsigned int cryptlen, u8 *dst)
-{
- u8 *buf = ctx->buffer;
- u128 lengths;
-
- lengths.a = cpu_to_be64(authlen * 8);
- lengths.b = cpu_to_be64(cryptlen * 8);
-
- crypto_gcm_ghash_flush(ctx);
- crypto_xor(buf, (u8 *)&lengths, 16);
- gf128mul_4k_lle((be128 *)buf, ctx->gf128);
- crypto_xor(dst, buf, 16);
-}
-
static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
{
struct crypto_gcm_setkey_result *result = req->data;
@@ -184,6 +85,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ struct crypto_ahash *ghash = ctx->ghash;
struct crypto_ablkcipher *ctr = ctx->ctr;
struct {
be128 hash;
@@ -233,13 +135,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err)
goto out;
- if (ctx->gf128 != NULL)
- gf128mul_free_4k(ctx->gf128);
-
- ctx->gf128 = gf128mul_init_4k_lle(&data->hash);
-
- if (ctx->gf128 == NULL)
- err = -ENOMEM;
+ crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
+ crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) &
+ CRYPTO_TFM_RES_MASK);
out:
kfree(data);
@@ -272,8 +173,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- u32 flags = req->base.tfm->crt_flags;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
struct scatterlist *dst;
__be32 counter = cpu_to_be32(1);
@@ -296,108 +195,398 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
cryptlen + sizeof(pctx->auth_tag),
req->iv);
+}
+
+static inline unsigned int gcm_remain(unsigned int len)
+{
+ len &= 0xfU;
+ return len ? 16 - len : 0;
+}
+
+static void gcm_hash_len_done(struct crypto_async_request *areq, int err);
+static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
- crypto_gcm_ghash_init(ghash, flags, ctx->gf128);
+static int gcm_hash_update(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx,
+ crypto_completion_t complete,
+ struct scatterlist *src,
+ unsigned int len)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
- crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen);
- crypto_gcm_ghash_flush(ghash);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ complete, req);
+ ahash_request_set_crypt(ahreq, src, NULL, len);
+
+ return crypto_ahash_update(ahreq);
}
-static int crypto_gcm_hash(struct aead_request *req)
+static int gcm_hash_remain(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx,
+ unsigned int remain,
+ crypto_completion_t complete)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ complete, req);
+ sg_init_one(pctx->src, gcm_zeroes, remain);
+ ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
+
+ return crypto_ahash_update(ahreq);
+}
+
+static int gcm_hash_len(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ u128 lengths;
+
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64(gctx->cryptlen * 8);
+ memcpy(pctx->iauth_tag, &lengths, 16);
+ sg_init_one(pctx->src, pctx->iauth_tag, 16);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_len_done, req);
+ ahash_request_set_crypt(ahreq, pctx->src,
+ NULL, sizeof(lengths));
+
+ return crypto_ahash_update(ahreq);
+}
+
+static int gcm_hash_final(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_final_done, req);
+ ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0);
+
+ return crypto_ahash_final(ahreq);
+}
+
+static void gcm_hash_final_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- u8 *auth_tag = pctx->auth_tag;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+
+ if (!err)
+ crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
- crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen);
- crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen,
- auth_tag);
+ gctx->complete(areq, err);
+}
+
+static void gcm_hash_len_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash_final(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_hash_final_done(areq, err);
+}
+
+static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash_len(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_hash_len_done(areq, err);
+}
+
+static void gcm_hash_crypt_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ unsigned int remain;
+
+ if (!err) {
+ remain = gcm_remain(gctx->cryptlen);
+ BUG_ON(!remain);
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_crypt_remain_done);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_hash_crypt_remain_done(areq, err);
+}
+
+static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ crypto_completion_t complete;
+ unsigned int remain = 0;
+
+ if (!err && gctx->cryptlen) {
+ remain = gcm_remain(gctx->cryptlen);
+ complete = remain ? gcm_hash_crypt_done :
+ gcm_hash_crypt_remain_done;
+ err = gcm_hash_update(req, pctx, complete,
+ gctx->src, gctx->cryptlen);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ if (remain)
+ gcm_hash_crypt_done(areq, err);
+ else
+ gcm_hash_crypt_remain_done(areq, err);
+}
+
+static void gcm_hash_assoc_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ unsigned int remain;
+
+ if (!err) {
+ remain = gcm_remain(req->assoclen);
+ BUG_ON(!remain);
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_assoc_remain_done);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_hash_assoc_remain_done(areq, err);
+}
+
+static void gcm_hash_init_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ crypto_completion_t complete;
+ unsigned int remain = 0;
+
+ if (!err && req->assoclen) {
+ remain = gcm_remain(req->assoclen);
+ complete = remain ? gcm_hash_assoc_done :
+ gcm_hash_assoc_remain_done;
+ err = gcm_hash_update(req, pctx, complete,
+ req->assoc, req->assoclen);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ if (remain)
+ gcm_hash_assoc_done(areq, err);
+ else
+ gcm_hash_assoc_remain_done(areq, err);
+}
+
+static int gcm_hash(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int remain;
+ crypto_completion_t complete;
+ int err;
+
+ ahash_request_set_tfm(ahreq, ctx->ghash);
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_init_done, req);
+ err = crypto_ahash_init(ahreq);
+ if (err)
+ return err;
+ remain = gcm_remain(req->assoclen);
+ complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
+ err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
+ if (err)
+ return err;
+ if (remain) {
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_assoc_remain_done);
+ if (err)
+ return err;
+ }
+ remain = gcm_remain(gctx->cryptlen);
+ complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
+ err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
+ if (err)
+ return err;
+ if (remain) {
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_crypt_remain_done);
+ if (err)
+ return err;
+ }
+ err = gcm_hash_len(req, pctx);
+ if (err)
+ return err;
+ err = gcm_hash_final(req, pctx);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void gcm_enc_copy_hash(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ u8 *auth_tag = pctx->auth_tag;
scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
- return 0;
}
-static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_enc_hash_done(struct crypto_async_request *areq,
+ int err)
{
struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
- err = crypto_gcm_hash(req);
+ gcm_enc_copy_hash(req, pctx);
aead_request_complete(req, err);
}
+static void gcm_encrypt_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_enc_hash_done(areq, err);
+}
+
static int crypto_gcm_encrypt(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->abreq;
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
int err;
crypto_gcm_init_crypt(abreq, req, req->cryptlen);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- crypto_gcm_encrypt_done, req);
+ gcm_encrypt_done, req);
+
+ gctx->src = req->dst;
+ gctx->cryptlen = req->cryptlen;
+ gctx->complete = gcm_enc_hash_done;
err = crypto_ablkcipher_encrypt(abreq);
if (err)
return err;
- return crypto_gcm_hash(req);
+ err = gcm_hash(req, pctx);
+ if (err)
+ return err;
+
+ crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
+ gcm_enc_copy_hash(req, pctx);
+
+ return 0;
}
-static int crypto_gcm_verify(struct aead_request *req)
+static int crypto_gcm_verify(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
u8 *auth_tag = pctx->auth_tag;
u8 *iauth_tag = pctx->iauth_tag;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen - authsize;
- crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag);
-
- authsize = crypto_aead_authsize(aead);
+ crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
-static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
- err = crypto_gcm_verify(req);
+ err = crypto_gcm_verify(req, pctx);
aead_request_complete(req, err);
}
+static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+
+ if (!err) {
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ gcm_decrypt_done, req);
+ crypto_gcm_init_crypt(abreq, req, gctx->cryptlen);
+ err = crypto_ablkcipher_decrypt(abreq);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_decrypt_done(areq, err);
+}
+
static int crypto_gcm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->abreq;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
- unsigned int cryptlen = req->cryptlen;
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int authsize = crypto_aead_authsize(aead);
+ unsigned int cryptlen = req->cryptlen;
int err;
if (cryptlen < authsize)
return -EINVAL;
cryptlen -= authsize;
- crypto_gcm_init_crypt(abreq, req, cryptlen);
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- crypto_gcm_decrypt_done, req);
+ gctx->src = req->src;
+ gctx->cryptlen = cryptlen;
+ gctx->complete = gcm_dec_hash_done;
- crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen);
+ err = gcm_hash(req, pctx);
+ if (err)
+ return err;
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ gcm_decrypt_done, req);
+ crypto_gcm_init_crypt(abreq, req, cryptlen);
err = crypto_ablkcipher_decrypt(abreq);
if (err)
return err;
- return crypto_gcm_verify(req);
+ return crypto_gcm_verify(req, pctx);
}
static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
@@ -406,43 +595,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ablkcipher *ctr;
+ struct crypto_ahash *ghash;
unsigned long align;
int err;
+ ghash = crypto_spawn_ahash(&ictx->ghash);
+ if (IS_ERR(ghash))
+ return PTR_ERR(ghash);
+
ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
- return err;
+ goto err_free_hash;
ctx->ctr = ctr;
- ctx->gf128 = NULL;
+ ctx->ghash = ghash;
align = crypto_tfm_alg_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
tfm->crt_aead.reqsize = align +
- sizeof(struct crypto_gcm_req_priv_ctx) +
- crypto_ablkcipher_reqsize(ctr);
+ offsetof(struct crypto_gcm_req_priv_ctx, u) +
+ max(sizeof(struct ablkcipher_request) +
+ crypto_ablkcipher_reqsize(ctr),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(ghash));
return 0;
+
+err_free_hash:
+ crypto_free_ahash(ghash);
+ return err;
}
static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->gf128 != NULL)
- gf128mul_free_4k(ctx->gf128);
-
+ crypto_free_ahash(ctx->ghash);
crypto_free_ablkcipher(ctx->ctr);
}
static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
const char *full_name,
- const char *ctr_name)
+ const char *ctr_name,
+ const char *ghash_name)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct crypto_alg *ctr;
+ struct crypto_alg *ghash_alg;
+ struct ahash_alg *ghash_ahash_alg;
struct gcm_instance_ctx *ctx;
int err;
@@ -454,17 +656,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
+ ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
+ CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ err = PTR_ERR(ghash_alg);
+ if (IS_ERR(ghash_alg))
+ return ERR_PTR(err);
+
+ err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
- return ERR_PTR(-ENOMEM);
+ goto out_put_ghash;
ctx = crypto_instance_ctx(inst);
+ ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base);
+ err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg,
+ inst);
+ if (err)
+ goto err_free_inst;
+
crypto_set_skcipher_spawn(&ctx->ctr, inst);
err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
if (err)
- goto err_free_inst;
+ goto err_drop_ghash;
ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
@@ -479,7 +695,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "gcm_base(%s)", ctr->cra_driver_name) >=
+ "gcm_base(%s,%s)", ctr->cra_driver_name,
+ ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
@@ -502,12 +719,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
out:
+ crypto_mod_put(ghash_alg);
return inst;
out_put_ctr:
crypto_drop_skcipher(&ctx->ctr);
+err_drop_ghash:
+ crypto_drop_ahash(&ctx->ghash);
err_free_inst:
kfree(inst);
+out_put_ghash:
inst = ERR_PTR(err);
goto out;
}
@@ -532,7 +753,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
- return crypto_gcm_alloc_common(tb, full_name, ctr_name);
+ return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash");
}
static void crypto_gcm_free(struct crypto_instance *inst)
@@ -540,6 +761,7 @@ static void crypto_gcm_free(struct crypto_instance *inst)
struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->ctr);
+ crypto_drop_ahash(&ctx->ghash);
kfree(inst);
}
@@ -554,6 +776,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
{
int err;
const char *ctr_name;
+ const char *ghash_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
@@ -561,11 +784,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
if (IS_ERR(ctr_name))
return ERR_PTR(err);
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)",
- ctr_name) >= CRYPTO_MAX_ALG_NAME)
+ ghash_name = crypto_attr_alg_name(tb[2]);
+ err = PTR_ERR(ghash_name);
+ if (IS_ERR(ghash_name))
+ return ERR_PTR(err);
+
+ if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
+ ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
- return crypto_gcm_alloc_common(tb, full_name, ctr_name);
+ return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name);
}
static struct crypto_template crypto_gcm_base_tmpl = {
@@ -784,6 +1012,10 @@ static int __init crypto_gcm_module_init(void)
{
int err;
+ gcm_zeroes = kzalloc(16, GFP_KERNEL);
+ if (!gcm_zeroes)
+ return -ENOMEM;
+
err = crypto_register_template(&crypto_gcm_base_tmpl);
if (err)
goto out;
@@ -796,18 +1028,20 @@ static int __init crypto_gcm_module_init(void)
if (err)
goto out_undo_gcm;
-out:
- return err;
+ return 0;
out_undo_gcm:
crypto_unregister_template(&crypto_gcm_tmpl);
out_undo_base:
crypto_unregister_template(&crypto_gcm_base_tmpl);
- goto out;
+out:
+ kfree(gcm_zeroes);
+ return err;
}
static void __exit crypto_gcm_module_exit(void)
{
+ kfree(gcm_zeroes);
crypto_unregister_template(&crypto_rfc4106_tmpl);
crypto_unregister_template(&crypto_gcm_tmpl);
crypto_unregister_template(&crypto_gcm_base_tmpl);
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
new file mode 100644
index 00000000000..be442561693
--- /dev/null
+++ b/crypto/ghash-generic.c
@@ -0,0 +1,170 @@
+/*
+ * GHASH: digest algorithm for GCM (Galois/Counter Mode).
+ *
+ * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
+ * Copyright (c) 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * The algorithm implementation is copied from gcm.c.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/hash.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_ctx {
+ struct gf128mul_4k *gf128;
+};
+
+struct ghash_desc_ctx {
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+};
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ memset(dctx, 0, sizeof(*dctx));
+
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ctx->gf128)
+ gf128mul_free_4k(ctx->gf128);
+ ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+ if (!ctx->gf128)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ u8 *dst = dctx->buffer;
+
+ if (dctx->bytes) {
+ int n = min(srclen, dctx->bytes);
+ u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ dctx->bytes -= n;
+ srclen -= n;
+
+ while (n--)
+ *pos++ ^= *src++;
+
+ if (!dctx->bytes)
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ }
+
+ while (srclen >= GHASH_BLOCK_SIZE) {
+ crypto_xor(dst, src, GHASH_BLOCK_SIZE);
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ src += GHASH_BLOCK_SIZE;
+ srclen -= GHASH_BLOCK_SIZE;
+ }
+
+ if (srclen) {
+ dctx->bytes = GHASH_BLOCK_SIZE - srclen;
+ while (srclen--)
+ *dst++ ^= *src++;
+ }
+
+ return 0;
+}
+
+static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+{
+ u8 *dst = dctx->buffer;
+
+ if (dctx->bytes) {
+ u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ while (dctx->bytes--)
+ *tmp++ ^= 0;
+
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ }
+
+ dctx->bytes = 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ u8 *buf = dctx->buffer;
+
+ ghash_flush(ctx, dctx);
+ memcpy(dst, buf, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static void ghash_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+ if (ctx->gf128)
+ gf128mul_free_4k(ctx->gf128);
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
+ .cra_exit = ghash_exit_tfm,
+ },
+};
+
+static int __init ghash_mod_init(void)
+{
+ return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_mod_exit(void)
+{
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_init(ghash_mod_init);
+module_exit(ghash_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
+MODULE_ALIAS("ghash");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 0ad39c37496..15c2eb53454 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -27,7 +27,7 @@
#include <linux/string.h>
struct hmac_ctx {
- struct crypto_hash *child;
+ struct crypto_shash *hash;
};
static inline void *align_ptr(void *p, unsigned int align)
@@ -35,65 +35,45 @@ static inline void *align_ptr(void *p, unsigned int align)
return (void *)ALIGN((unsigned long)p, align);
}
-static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm)
+static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
{
- return align_ptr(crypto_hash_ctx_aligned(tfm) +
- crypto_hash_blocksize(tfm) * 2 +
- crypto_hash_digestsize(tfm), sizeof(void *));
+ return align_ptr(crypto_shash_ctx_aligned(tfm) +
+ crypto_shash_statesize(tfm) * 2,
+ crypto_tfm_ctx_alignment());
}
-static int hmac_setkey(struct crypto_hash *parent,
+static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- char *opad = ipad + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct crypto_hash *tfm = ctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *ipad = crypto_shash_ctx_aligned(parent);
+ char *opad = ipad + ss;
+ struct hmac_ctx *ctx = align_ptr(opad + ss,
+ crypto_tfm_ctx_alignment());
+ struct crypto_shash *hash = ctx->hash;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(hash)];
+ } desc;
unsigned int i;
+ desc.shash.tfm = hash;
+ desc.shash.flags = crypto_shash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
if (keylen > bs) {
- struct hash_desc desc;
- struct scatterlist tmp;
- int tmplen;
int err;
- desc.tfm = tfm;
- desc.flags = crypto_hash_get_flags(parent);
- desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_init(&desc);
+ err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad);
if (err)
return err;
- tmplen = bs * 2 + ds;
- sg_init_one(&tmp, ipad, tmplen);
-
- for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) {
- memcpy(ipad, inkey, tmplen);
- err = crypto_hash_update(&desc, &tmp, tmplen);
- if (err)
- return err;
- }
-
- if (keylen) {
- memcpy(ipad, inkey, keylen);
- err = crypto_hash_update(&desc, &tmp, keylen);
- if (err)
- return err;
- }
-
- err = crypto_hash_final(&desc, digest);
- if (err)
- return err;
-
- inkey = digest;
keylen = ds;
- }
+ } else
+ memcpy(ipad, inkey, keylen);
- memcpy(ipad, inkey, keylen);
memset(ipad + keylen, 0, bs - keylen);
memcpy(opad, ipad, bs);
@@ -102,184 +82,178 @@ static int hmac_setkey(struct crypto_hash *parent,
opad[i] ^= 0x5c;
}
- return 0;
+ return crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, ipad, bs) ?:
+ crypto_shash_export(&desc.shash, ipad) ?:
+ crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, opad, bs) ?:
+ crypto_shash_export(&desc.shash, opad);
}
-static int hmac_init(struct hash_desc *pdesc)
+static int hmac_export(struct shash_desc *pdesc, void *out)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist tmp;
- int err;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_one(&tmp, ipad, bs);
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_init(&desc);
- if (unlikely(err))
- return err;
-
- return crypto_hash_update(&desc, &tmp, bs);
+ return crypto_shash_export(desc, out);
}
-static int hmac_update(struct hash_desc *pdesc,
- struct scatterlist *sg, unsigned int nbytes)
+static int hmac_import(struct shash_desc *pdesc, const void *in)
{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
- struct hash_desc desc;
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = ctx->hash;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_hash_update(&desc, sg, nbytes);
+ return crypto_shash_import(desc, in);
}
-static int hmac_final(struct hash_desc *pdesc, u8 *out)
+static int hmac_init(struct shash_desc *pdesc)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *opad = crypto_hash_ctx_aligned(parent) + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist tmp;
- int err;
+ return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
+}
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_one(&tmp, opad, bs + ds);
+static int hmac_update(struct shash_desc *pdesc,
+ const u8 *data, unsigned int nbytes)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- err = crypto_hash_final(&desc, digest);
- if (unlikely(err))
- return err;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_hash_digest(&desc, &tmp, bs + ds, out);
+ return crypto_shash_update(desc, data, nbytes);
}
-static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
- unsigned int nbytes, u8 *out)
+static int hmac_final(struct shash_desc *pdesc, u8 *out)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- char *opad = ipad + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist sg1[2];
- struct scatterlist sg2[1];
- int err;
+ struct crypto_shash *parent = pdesc->tfm;
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_table(sg1, 2);
- sg_set_buf(sg1, ipad, bs);
- scatterwalk_sg_chain(sg1, 2, sg);
+ return crypto_shash_final(desc, out) ?:
+ crypto_shash_import(desc, opad) ?:
+ crypto_shash_finup(desc, out, ds, out);
+}
- sg_init_table(sg2, 1);
- sg_set_buf(sg2, opad, bs + ds);
+static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
+ unsigned int nbytes, u8 *out)
+{
- err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest);
- if (unlikely(err))
- return err;
+ struct crypto_shash *parent = pdesc->tfm;
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- return crypto_hash_digest(&desc, sg2, bs + ds, out);
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_finup(desc, data, nbytes, out) ?:
+ crypto_shash_import(desc, opad) ?:
+ crypto_shash_finup(desc, out, ds, out);
}
static int hmac_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_hash *hash;
+ struct crypto_shash *parent = __crypto_shash_cast(tfm);
+ struct crypto_shash *hash;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
+ struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst);
+ struct hmac_ctx *ctx = hmac_ctx(parent);
- hash = crypto_spawn_hash(spawn);
+ hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
- ctx->child = hash;
+ parent->descsize = sizeof(struct shash_desc) +
+ crypto_shash_descsize(hash);
+
+ ctx->hash = hash;
return 0;
}
static void hmac_exit_tfm(struct crypto_tfm *tfm)
{
- struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
- crypto_free_hash(ctx->child);
+ struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm));
+ crypto_free_shash(ctx->hash);
}
-static void hmac_free(struct crypto_instance *inst)
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
-}
-
-static struct crypto_instance *hmac_alloc(struct rtattr **tb)
-{
- struct crypto_instance *inst;
+ struct shash_instance *inst;
struct crypto_alg *alg;
+ struct shash_alg *salg;
int err;
int ds;
+ int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
- return ERR_PTR(err);
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
-
- inst = ERR_PTR(-EINVAL);
- ds = alg->cra_type == &crypto_hash_type ?
- alg->cra_hash.digestsize :
- alg->cra_type ?
- __crypto_shash_alg(alg)->digestsize :
- alg->cra_digest.dia_digestsize;
- if (ds > alg->cra_blocksize)
+ return err;
+
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
+
+ err = -EINVAL;
+ ds = salg->digestsize;
+ ss = salg->statesize;
+ alg = &salg->base;
+ if (ds > alg->cra_blocksize ||
+ ss < alg->cra_blocksize)
goto out_put_alg;
- inst = crypto_alloc_instance("hmac", alg);
+ inst = shash_alloc_instance("hmac", alg);
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_hash_type;
-
- inst->alg.cra_hash.digestsize = ds;
-
- inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
- ALIGN(inst->alg.cra_blocksize * 2 + ds,
- sizeof(void *));
-
- inst->alg.cra_init = hmac_init_tfm;
- inst->alg.cra_exit = hmac_exit_tfm;
-
- inst->alg.cra_hash.init = hmac_init;
- inst->alg.cra_hash.update = hmac_update;
- inst->alg.cra_hash.final = hmac_final;
- inst->alg.cra_hash.digest = hmac_digest;
- inst->alg.cra_hash.setkey = hmac_setkey;
+ err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg,
+ shash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ ss = ALIGN(ss, alg->cra_alignmask + 1);
+ inst->alg.digestsize = ds;
+ inst->alg.statesize = ss;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
+ ALIGN(ss * 2, crypto_tfm_ctx_alignment());
+
+ inst->alg.base.cra_init = hmac_init_tfm;
+ inst->alg.base.cra_exit = hmac_exit_tfm;
+
+ inst->alg.init = hmac_init;
+ inst->alg.update = hmac_update;
+ inst->alg.final = hmac_final;
+ inst->alg.finup = hmac_finup;
+ inst->alg.export = hmac_export;
+ inst->alg.import = hmac_import;
+ inst->alg.setkey = hmac_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static struct crypto_template hmac_tmpl = {
.name = "hmac",
- .alloc = hmac_alloc,
- .free = hmac_free,
+ .create = hmac_create,
+ .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/crypto/internal.h b/crypto/internal.h
index 113579a82df..2d226362e59 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -25,12 +25,7 @@
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
-
-#ifdef CONFIG_CRYPTO_FIPS
-extern int fips_enabled;
-#else
-#define fips_enabled 0
-#endif
+#include <linux/fips.h>
/* Crypto notification events. */
enum {
@@ -65,18 +60,6 @@ static inline void crypto_exit_proc(void)
{ }
#endif
-static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg)
-{
- unsigned int len = alg->cra_ctxsize;
-
- if (alg->cra_alignmask) {
- len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
- len += alg->cra_digest.dia_digestsize;
- }
-
- return len;
-}
-
static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
@@ -91,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
-int crypto_init_digest_ops(struct crypto_tfm *tfm);
-int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
-void crypto_exit_digest_ops(struct crypto_tfm *tfm);
void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
@@ -111,12 +91,12 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend);
+struct crypto_alg *crypto_find_alg(const char *alg_name,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask);
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask);
-int crypto_register_instance(struct crypto_template *tmpl,
- struct crypto_instance *inst);
-
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v);
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index bcadc03726b..f7c4a7d7412 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0;
}
-static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
-static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm,
- const struct crypto_type *frontend)
+static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
{
return 0;
}
diff --git a/crypto/rng.c b/crypto/rng.c
index 6e94bc73557..ba05e7380e7 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -123,4 +123,4 @@ void crypto_put_default_rng(void)
EXPORT_SYMBOL_GPL(crypto_put_default_rng);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Random Number Genertor");
+MODULE_DESCRIPTION("Random Number Generator");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 9efef20454c..0416091bf45 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -25,31 +25,21 @@
#include <crypto/sha.h>
#include <asm/byteorder.h>
-struct sha1_ctx {
- u64 count;
- u32 state[5];
- u8 buffer[64];
-};
-
static int sha1_init(struct shash_desc *desc)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
- static const struct sha1_ctx initstate = {
- 0,
- { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- { 0, }
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
- *sctx = initstate;
-
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
const u8 *src;
@@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
u32 i, index, padlen;
__be64 bits;
@@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
return 0;
}
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.final = sha1_final,
- .descsize = sizeof(struct sha1_ctx),
+ .export = sha1_export,
+ .import = sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 6349d8339d3..c48459ebf05 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -25,12 +25,6 @@
#include <crypto/sha.h>
#include <asm/byteorder.h>
-struct sha256_ctx {
- u32 count[2];
- u32 state[8];
- u8 buf[128];
-};
-
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
@@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input)
static int sha224_init(struct shash_desc *desc)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1;
sctx->state[2] = SHA224_H2;
@@ -231,15 +225,14 @@ static int sha224_init(struct shash_desc *desc)
sctx->state[5] = SHA224_H5;
sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7;
- sctx->count[0] = 0;
- sctx->count[1] = 0;
+ sctx->count = 0;
return 0;
}
static int sha256_init(struct shash_desc *desc)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
sctx->state[2] = SHA256_H2;
@@ -248,7 +241,7 @@ static int sha256_init(struct shash_desc *desc)
sctx->state[5] = SHA256_H5;
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
- sctx->count[0] = sctx->count[1] = 0;
+ sctx->count = 0;
return 0;
}
@@ -256,58 +249,54 @@ static int sha256_init(struct shash_desc *desc)
static int sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
- unsigned int i, index, part_len;
-
- /* Compute number of bytes mod 128 */
- index = (unsigned int)((sctx->count[0] >> 3) & 0x3f);
-
- /* Update number of bits */
- if ((sctx->count[0] += (len << 3)) < (len << 3)) {
- sctx->count[1]++;
- sctx->count[1] += (len >> 29);
- }
-
- part_len = 64 - index;
-
- /* Transform as many times as possible. */
- if (len >= part_len) {
- memcpy(&sctx->buf[index], data, part_len);
- sha256_transform(sctx->state, sctx->buf);
-
- for (i = part_len; i + 63 < len; i += 64)
- sha256_transform(sctx->state, &data[i]);
- index = 0;
- } else {
- i = 0;
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial, done;
+ const u8 *src;
+
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) > 63) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buf + partial, data, done + 64);
+ src = sctx->buf;
+ }
+
+ do {
+ sha256_transform(sctx->state, src);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
+
+ partial = 0;
}
-
- /* Buffer remaining input */
- memcpy(&sctx->buf[index], &data[i], len-i);
+ memcpy(sctx->buf + partial, src, len - done);
return 0;
}
static int sha256_final(struct shash_desc *desc, u8 *out)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
- __be32 bits[2];
+ __be64 bits;
unsigned int index, pad_len;
int i;
static const u8 padding[64] = { 0x80, };
/* Save number of bits */
- bits[1] = cpu_to_be32(sctx->count[0]);
- bits[0] = cpu_to_be32(sctx->count[1]);
+ bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64. */
- index = (sctx->count[0] >> 3) & 0x3f;
+ index = sctx->count & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
sha256_update(desc, padding, pad_len);
/* Append length (before padding) */
- sha256_update(desc, (const u8 *)bits, sizeof(bits));
+ sha256_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
@@ -331,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
return 0;
}
+static int sha256_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
static struct shash_alg sha256 = {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init,
.update = sha256_update,
.final = sha256_final,
- .descsize = sizeof(struct sha256_ctx),
+ .export = sha256_export,
+ .import = sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
@@ -351,7 +359,7 @@ static struct shash_alg sha224 = {
.init = sha224_init,
.update = sha256_update,
.final = sha224_final,
- .descsize = sizeof(struct sha256_ctx),
+ .descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-generic",
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 3bea38d1224..9ed9f60316e 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -21,12 +21,6 @@
#include <linux/percpu.h>
#include <asm/byteorder.h>
-struct sha512_ctx {
- u64 state[8];
- u32 count[4];
- u8 buf[128];
-};
-
static DEFINE_PER_CPU(u64[80], msg_schedule);
static inline u64 Ch(u64 x, u64 y, u64 z)
@@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input)
static int
sha512_init(struct shash_desc *desc)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA512_H0;
sctx->state[1] = SHA512_H1;
sctx->state[2] = SHA512_H2;
@@ -150,7 +144,7 @@ sha512_init(struct shash_desc *desc)
sctx->state[5] = SHA512_H5;
sctx->state[6] = SHA512_H6;
sctx->state[7] = SHA512_H7;
- sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
+ sctx->count[0] = sctx->count[1] = 0;
return 0;
}
@@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc)
static int
sha384_init(struct shash_desc *desc)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA384_H0;
sctx->state[1] = SHA384_H1;
sctx->state[2] = SHA384_H2;
@@ -167,7 +161,7 @@ sha384_init(struct shash_desc *desc)
sctx->state[5] = SHA384_H5;
sctx->state[6] = SHA384_H6;
sctx->state[7] = SHA384_H7;
- sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
+ sctx->count[0] = sctx->count[1] = 0;
return 0;
}
@@ -175,20 +169,16 @@ sha384_init(struct shash_desc *desc)
static int
sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, part_len;
/* Compute number of bytes mod 128 */
- index = (unsigned int)((sctx->count[0] >> 3) & 0x7F);
-
- /* Update number of bits */
- if ((sctx->count[0] += (len << 3)) < (len << 3)) {
- if ((sctx->count[1] += 1) < 1)
- if ((sctx->count[2] += 1) < 1)
- sctx->count[3]++;
- sctx->count[1] += (len >> 29);
- }
+ index = sctx->count[0] & 0x7f;
+
+ /* Update number of bytes */
+ if (!(sctx->count[0] += len))
+ sctx->count[1]++;
part_len = 128 - index;
@@ -214,21 +204,19 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
static int
sha512_final(struct shash_desc *desc, u8 *hash)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
static u8 padding[128] = { 0x80, };
__be64 *dst = (__be64 *)hash;
- __be32 bits[4];
+ __be64 bits[2];
unsigned int index, pad_len;
int i;
/* Save number of bits */
- bits[3] = cpu_to_be32(sctx->count[0]);
- bits[2] = cpu_to_be32(sctx->count[1]);
- bits[1] = cpu_to_be32(sctx->count[2]);
- bits[0] = cpu_to_be32(sctx->count[3]);
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
/* Pad out to 112 mod 128. */
- index = (sctx->count[0] >> 3) & 0x7f;
+ index = sctx->count[0] & 0x7f;
pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
sha512_update(desc, padding, pad_len);
@@ -240,7 +228,7 @@ sha512_final(struct shash_desc *desc, u8 *hash)
dst[i] = cpu_to_be64(sctx->state[i]);
/* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(struct sha512_ctx));
+ memset(sctx, 0, sizeof(struct sha512_state));
return 0;
}
@@ -262,7 +250,7 @@ static struct shash_alg sha512 = {
.init = sha512_init,
.update = sha512_update,
.final = sha512_final,
- .descsize = sizeof(struct sha512_ctx),
+ .descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
@@ -276,7 +264,7 @@ static struct shash_alg sha384 = {
.init = sha384_init,
.update = sha512_update,
.final = sha384_final,
- .descsize = sizeof(struct sha512_ctx),
+ .descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
diff --git a/crypto/shash.c b/crypto/shash.c
index 2ccc8b0076c..91f7b9d8388 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -22,6 +22,12 @@
static const struct crypto_type crypto_shash_type;
+static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return -ENOSYS;
+}
+
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -39,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
err = shash->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ kzfree(buffer);
return err;
}
@@ -50,9 +55,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (!shash->setkey)
- return -ENOSYS;
-
if ((unsigned long)key & alignmask)
return shash_setkey_unaligned(tfm, key, keylen);
@@ -74,15 +76,19 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned int unaligned_len = alignmask + 1 -
((unsigned long)data & alignmask);
- u8 buf[shash_align_buffer_size(unaligned_len, alignmask)]
+ u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
__attribute__ ((aligned));
+ u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
+ int err;
if (unaligned_len > len)
unaligned_len = len;
memcpy(buf, data, unaligned_len);
+ err = shash->update(desc, buf, unaligned_len);
+ memset(buf, 0, unaligned_len);
- return shash->update(desc, buf, unaligned_len) ?:
+ return err ?:
shash->update(desc, data + unaligned_len, len - unaligned_len);
}
@@ -106,12 +112,19 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
unsigned long alignmask = crypto_shash_alignmask(tfm);
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned int ds = crypto_shash_digestsize(tfm);
- u8 buf[shash_align_buffer_size(ds, alignmask)]
+ u8 ubuf[shash_align_buffer_size(ds, alignmask)]
__attribute__ ((aligned));
+ u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
err = shash->final(desc, buf);
+ if (err)
+ goto out;
+
memcpy(out, buf, ds);
+
+out:
+ memset(buf, 0, ds);
return err;
}
@@ -142,8 +155,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (((unsigned long)data | (unsigned long)out) & alignmask ||
- !shash->finup)
+ if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_finup_unaligned(desc, data, len, out);
return shash->finup(desc, data, len, out);
@@ -154,8 +166,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_init(desc) ?:
- crypto_shash_update(desc, data, len) ?:
- crypto_shash_final(desc, out);
+ crypto_shash_finup(desc, data, len, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -165,27 +176,24 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (((unsigned long)data | (unsigned long)out) & alignmask ||
- !shash->digest)
+ if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
return shash->digest(desc, data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
-int crypto_shash_import(struct shash_desc *desc, const u8 *in)
+static int shash_default_export(struct shash_desc *desc, void *out)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *alg = crypto_shash_alg(tfm);
-
- memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
-
- if (alg->reinit)
- alg->reinit(desc);
+ memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
+ return 0;
+}
+static int shash_default_import(struct shash_desc *desc, const void *in)
+{
+ memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_shash_import);
static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
@@ -206,9 +214,8 @@ static int shash_async_init(struct ahash_request *req)
return crypto_shash_init(desc);
}
-static int shash_async_update(struct ahash_request *req)
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
{
- struct shash_desc *desc = ahash_request_ctx(req);
struct crypto_hash_walk walk;
int nbytes;
@@ -218,13 +225,51 @@ static int shash_async_update(struct ahash_request *req)
return nbytes;
}
+EXPORT_SYMBOL_GPL(shash_ahash_update);
+
+static int shash_async_update(struct ahash_request *req)
+{
+ return shash_ahash_update(req, ahash_request_ctx(req));
+}
static int shash_async_final(struct ahash_request *req)
{
return crypto_shash_final(ahash_request_ctx(req), req->result);
}
-static int shash_async_digest(struct ahash_request *req)
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
+{
+ struct crypto_hash_walk walk;
+ int nbytes;
+
+ nbytes = crypto_hash_walk_first(req, &walk);
+ if (!nbytes)
+ return crypto_shash_final(desc, req->result);
+
+ do {
+ nbytes = crypto_hash_walk_last(&walk) ?
+ crypto_shash_finup(desc, walk.data, nbytes,
+ req->result) :
+ crypto_shash_update(desc, walk.data, nbytes);
+ nbytes = crypto_hash_walk_done(&walk, nbytes);
+ } while (nbytes > 0);
+
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_finup);
+
+static int shash_async_finup(struct ahash_request *req)
+{
+ struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct shash_desc *desc = ahash_request_ctx(req);
+
+ desc->tfm = *ctx;
+ desc->flags = req->base.flags;
+
+ return shash_ahash_finup(req, desc);
+}
+
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
struct scatterlist *sg = req->src;
unsigned int offset = sg->offset;
@@ -232,34 +277,40 @@ static int shash_async_digest(struct ahash_request *req)
int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
- struct crypto_shash **ctx =
- crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
void *data;
- desc->tfm = *ctx;
- desc->flags = req->base.flags;
-
data = crypto_kmap(sg_page(sg), 0);
err = crypto_shash_digest(desc, data + offset, nbytes,
req->result);
crypto_kunmap(data, 0);
crypto_yield(desc->flags);
- goto out;
- }
+ } else
+ err = crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
- err = shash_async_init(req);
- if (err)
- goto out;
+ return err;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_digest);
- err = shash_async_update(req);
- if (err)
- goto out;
+static int shash_async_digest(struct ahash_request *req)
+{
+ struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct shash_desc *desc = ahash_request_ctx(req);
- err = shash_async_final(req);
+ desc->tfm = *ctx;
+ desc->flags = req->base.flags;
-out:
- return err;
+ return shash_ahash_digest(req, desc);
+}
+
+static int shash_async_export(struct ahash_request *req, void *out)
+{
+ return crypto_shash_export(ahash_request_ctx(req), out);
+}
+
+static int shash_async_import(struct ahash_request *req, const void *in)
+{
+ return crypto_shash_import(ahash_request_ctx(req), in);
}
static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
@@ -269,11 +320,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
crypto_free_shash(*ctx);
}
-static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
{
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
- struct ahash_tfm *crt = &tfm->crt_ahash;
+ struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
@@ -291,11 +342,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->init = shash_async_init;
crt->update = shash_async_update;
- crt->final = shash_async_final;
+ crt->final = shash_async_final;
+ crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
- crt->setkey = shash_async_setkey;
- crt->digestsize = alg->digestsize;
+ if (alg->setkey)
+ crt->setkey = shash_async_setkey;
+ if (alg->export)
+ crt->export = shash_async_export;
+ if (alg->import)
+ crt->import = shash_async_import;
+
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0;
@@ -304,14 +361,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
unsigned int keylen)
{
- struct shash_desc *desc = crypto_hash_ctx(tfm);
+ struct shash_desc **descp = crypto_hash_ctx(tfm);
+ struct shash_desc *desc = *descp;
return crypto_shash_setkey(desc->tfm, key, keylen);
}
static int shash_compat_init(struct hash_desc *hdesc)
{
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
desc->flags = hdesc->flags;
@@ -321,7 +380,8 @@ static int shash_compat_init(struct hash_desc *hdesc)
static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
unsigned int len)
{
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
struct crypto_hash_walk walk;
int nbytes;
@@ -334,7 +394,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
{
- return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+
+ return crypto_shash_final(*descp, out);
}
static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
@@ -344,7 +406,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
void *data;
desc->flags = hdesc->flags;
@@ -372,9 +435,11 @@ out:
static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
{
- struct shash_desc *desc= crypto_tfm_ctx(tfm);
+ struct shash_desc **descp = crypto_tfm_ctx(tfm);
+ struct shash_desc *desc = *descp;
crypto_free_shash(desc->tfm);
+ kzfree(desc);
}
static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
@@ -382,8 +447,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
struct hash_tfm *crt = &tfm->crt_hash;
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
- struct shash_desc *desc = crypto_tfm_ctx(tfm);
+ struct shash_desc **descp = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
+ struct shash_desc *desc;
if (!crypto_mod_get(calg))
return -EAGAIN;
@@ -394,6 +460,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
return PTR_ERR(shash);
}
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!desc) {
+ crypto_free_shash(shash);
+ return -ENOMEM;
+ }
+
+ *descp = desc;
desc->tfm = shash;
tfm->exit = crypto_exit_shash_ops_compat;
@@ -413,8 +487,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK:
return crypto_init_shash_ops_compat(tfm);
- case CRYPTO_ALG_TYPE_AHASH_MASK:
- return crypto_init_shash_ops_async(tfm);
}
return -EINVAL;
@@ -423,26 +495,23 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
- struct shash_alg *salg = __crypto_shash_alg(alg);
-
switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK:
- return sizeof(struct shash_desc) + salg->descsize;
- case CRYPTO_ALG_TYPE_AHASH_MASK:
- return sizeof(struct crypto_shash *);
+ return sizeof(struct shash_desc *);
}
return 0;
}
-static int crypto_shash_init_tfm(struct crypto_tfm *tfm,
- const struct crypto_type *frontend)
+static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
+ struct crypto_shash *hash = __crypto_shash_cast(tfm);
+
+ hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}
-static unsigned int crypto_shash_extsize(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
@@ -456,7 +525,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "type : shash\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", salg->digestsize);
- seq_printf(m, "descsize : %u\n", salg->descsize);
}
static const struct crypto_type crypto_shash_type = {
@@ -480,18 +548,43 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_shash);
-int crypto_register_shash(struct shash_alg *alg)
+static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
if (alg->digestsize > PAGE_SIZE / 8 ||
- alg->descsize > PAGE_SIZE / 8)
+ alg->descsize > PAGE_SIZE / 8 ||
+ alg->statesize > PAGE_SIZE / 8)
return -EINVAL;
base->cra_type = &crypto_shash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
+ if (!alg->finup)
+ alg->finup = shash_finup_unaligned;
+ if (!alg->digest)
+ alg->digest = shash_digest_unaligned;
+ if (!alg->export) {
+ alg->export = shash_default_export;
+ alg->import = shash_default_import;
+ alg->statesize = alg->descsize;
+ }
+ if (!alg->setkey)
+ alg->setkey = shash_no_setkey;
+
+ return 0;
+}
+
+int crypto_register_shash(struct shash_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = shash_prepare_alg(alg);
+ if (err)
+ return err;
+
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_shash);
@@ -502,5 +595,44 @@ int crypto_unregister_shash(struct shash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
+int shash_register_instance(struct crypto_template *tmpl,
+ struct shash_instance *inst)
+{
+ int err;
+
+ err = shash_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, shash_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(shash_register_instance);
+
+void shash_free_instance(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(shash_instance(inst));
+}
+EXPORT_SYMBOL_GPL(shash_free_instance);
+
+int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
+ struct shash_alg *alg,
+ struct crypto_instance *inst)
+{
+ return crypto_init_spawn2(&spawn->base, &alg->base, inst,
+ &crypto_shash_type);
+}
+EXPORT_SYMBOL_GPL(crypto_init_shash_spawn);
+
+struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+{
+ struct crypto_alg *alg;
+
+ alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask);
+ return IS_ERR(alg) ? ERR_CAST(alg) :
+ container_of(alg, struct shash_alg, base);
+}
+EXPORT_SYMBOL_GPL(shash_attr_alg);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous cryptographic hash type");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d59ba5079d1..aa3f84ccc78 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -45,6 +45,9 @@
*/
static unsigned int sec;
+static char *alg = NULL;
+static u32 type;
+static u32 mask;
static int mode;
static char *tvmem[TVMEMSIZE];
@@ -716,6 +719,10 @@ static int do_test(int m)
ret += tcrypt_test("hmac(rmd160)");
break;
+ case 109:
+ ret += tcrypt_test("vmac(aes)");
+ break;
+
case 150:
ret += tcrypt_test("ansi_cprng");
break;
@@ -885,6 +892,12 @@ static int do_test(int m)
return ret;
}
+static int do_alg_test(const char *alg, u32 type, u32 mask)
+{
+ return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
+ 0 : -ENOENT;
+}
+
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
@@ -896,7 +909,11 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
- err = do_test(mode);
+ if (alg)
+ err = do_alg_test(alg, type, mask);
+ else
+ err = do_test(mode);
+
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
@@ -928,6 +945,9 @@ static void __exit tcrypt_mod_fini(void) { }
module_init(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
+module_param(alg, charp, 0);
+module_param(type, uint, 0);
+module_param(mask, uint, 0);
module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e9e9d84293b..6d5b746637b 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -190,10 +190,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
hash_buff = xbuf[0];
- ret = -EINVAL;
- if (WARN_ON(template[i].psize > PAGE_SIZE))
- goto out;
-
memcpy(hash_buff, template[i].plaintext, template[i].psize);
sg_init_one(&sg[0], hash_buff, template[i].psize);
@@ -2252,6 +2248,15 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "vmac(aes)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = {
+ .vecs = aes_vmac128_tv_template,
+ .count = VMAC_AES_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "wp256",
.test = alg_test_hash,
.suite = {
@@ -2348,6 +2353,7 @@ static int alg_find_test(const char *alg)
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
int i;
+ int j;
int rc;
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
@@ -2369,14 +2375,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
}
i = alg_find_test(alg);
- if (i < 0)
+ j = alg_find_test(driver);
+ if (i < 0 && j < 0)
goto notest;
- if (fips_enabled && !alg_test_descs[i].fips_allowed)
+ if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) ||
+ (j >= 0 && !alg_test_descs[j].fips_allowed)))
goto non_fips_alg;
- rc = alg_test_descs[i].test(alg_test_descs + i, driver,
- type, mask);
+ rc = 0;
+ if (i >= 0)
+ rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
+ type, mask);
+ if (j >= 0)
+ rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
+ type, mask);
+
test_done:
if (fips_enabled && rc)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 69316228fc1..9963b18983a 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1654,6 +1654,22 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
+#define VMAC_AES_TEST_VECTORS 1
+static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
+ '\x02', '\x03', '\x02', '\x02',
+ '\x02', '\x04', '\x01', '\x07',
+ '\x04', '\x01', '\x04', '\x03',};
+static struct hash_testvec aes_vmac128_tv_template[] = {
+ {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string,
+ .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
+ .psize = 128,
+ .ksize = 16,
+ },
+};
+
/*
* SHA384 HMAC test vectors from RFC4231
*/
diff --git a/crypto/vmac.c b/crypto/vmac.c
new file mode 100644
index 00000000000..0a9468e575d
--- /dev/null
+++ b/crypto/vmac.c
@@ -0,0 +1,678 @@
+/*
+ * Modified to interface to the Linux kernel
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+/* --------------------------------------------------------------------------
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Please send bug reports to the authors.
+ * Last modified: 17 APR 08, 1700 PDT
+ * ----------------------------------------------------------------------- */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <asm/byteorder.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/vmac.h>
+#include <crypto/internal/hash.h>
+
+/*
+ * Constants and masks
+ */
+#define UINT64_C(x) x##ULL
+const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
+const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
+const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
+const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
+const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
+
+#ifdef __LITTLE_ENDIAN
+#define INDEX_HIGH 1
+#define INDEX_LOW 0
+#else
+#define INDEX_HIGH 0
+#define INDEX_LOW 1
+#endif
+
+/*
+ * The following routines are used in this implementation. They are
+ * written via macros to simulate zero-overhead call-by-reference.
+ *
+ * MUL64: 64x64->128-bit multiplication
+ * PMUL64: assumes top bits cleared on inputs
+ * ADD128: 128x128->128-bit addition
+ */
+
+#define ADD128(rh, rl, ih, il) \
+ do { \
+ u64 _il = (il); \
+ (rl) += (_il); \
+ if ((rl) < (_il)) \
+ (rh)++; \
+ (rh) += (ih); \
+ } while (0)
+
+#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
+
+#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
+ do { \
+ u64 _i1 = (i1), _i2 = (i2); \
+ u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
+ rh = MUL32(_i1>>32, _i2>>32); \
+ rl = MUL32(_i1, _i2); \
+ ADD128(rh, rl, (m >> 32), (m << 32)); \
+ } while (0)
+
+#define MUL64(rh, rl, i1, i2) \
+ do { \
+ u64 _i1 = (i1), _i2 = (i2); \
+ u64 m1 = MUL32(_i1, _i2>>32); \
+ u64 m2 = MUL32(_i1>>32, _i2); \
+ rh = MUL32(_i1>>32, _i2>>32); \
+ rl = MUL32(_i1, _i2); \
+ ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
+ ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
+ } while (0)
+
+/*
+ * For highest performance the L1 NH and L2 polynomial hashes should be
+ * carefully implemented to take advantage of one's target architechture.
+ * Here these two hash functions are defined multiple time; once for
+ * 64-bit architectures, once for 32-bit SSE2 architectures, and once
+ * for the rest (32-bit) architectures.
+ * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
+ * Optionally, nh_vmac_nhbytes can be defined (for multiples of
+ * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
+ * NH computations at once).
+ */
+
+#ifdef CONFIG_64BIT
+
+#define nh_16(mp, kp, nw, rh, rl) \
+ do { \
+ int i; u64 th, tl; \
+ rh = rl = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ } \
+ } while (0)
+
+#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
+ do { \
+ int i; u64 th, tl; \
+ rh1 = rl1 = rh = rl = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ ADD128(rh1, rl1, th, tl); \
+ } \
+ } while (0)
+
+#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
+#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
+ do { \
+ int i; u64 th, tl; \
+ rh = rl = 0; \
+ for (i = 0; i < nw; i += 8) { \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ ADD128(rh, rl, th, tl); \
+ } \
+ } while (0)
+
+#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
+ do { \
+ int i; u64 th, tl; \
+ rh1 = rl1 = rh = rl = 0; \
+ for (i = 0; i < nw; i += 8) { \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
+ le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
+ le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
+ le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
+ ADD128(rh1, rl1, th, tl); \
+ } \
+ } while (0)
+#endif
+
+#define poly_step(ah, al, kh, kl, mh, ml) \
+ do { \
+ u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
+ /* compute ab*cd, put bd into result registers */ \
+ PMUL64(t3h, t3l, al, kh); \
+ PMUL64(t2h, t2l, ah, kl); \
+ PMUL64(t1h, t1l, ah, 2*kh); \
+ PMUL64(ah, al, al, kl); \
+ /* add 2 * ac to result */ \
+ ADD128(ah, al, t1h, t1l); \
+ /* add together ad + bc */ \
+ ADD128(t2h, t2l, t3h, t3l); \
+ /* now (ah,al), (t2l,2*t2h) need summing */ \
+ /* first add the high registers, carrying into t2h */ \
+ ADD128(t2h, ah, z, t2l); \
+ /* double t2h and add top bit of ah */ \
+ t2h = 2 * t2h + (ah >> 63); \
+ ah &= m63; \
+ /* now add the low registers */ \
+ ADD128(ah, al, mh, ml); \
+ ADD128(ah, al, z, t2h); \
+ } while (0)
+
+#else /* ! CONFIG_64BIT */
+
+#ifndef nh_16
+#define nh_16(mp, kp, nw, rh, rl) \
+ do { \
+ u64 t1, t2, m1, m2, t; \
+ int i; \
+ rh = rl = t = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ t1 = le64_to_cpup(mp+i) + kp[i]; \
+ t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
+ m2 = MUL32(t1 >> 32, t2); \
+ m1 = MUL32(t1, t2 >> 32); \
+ ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
+ MUL32(t1, t2)); \
+ rh += (u64)(u32)(m1 >> 32) \
+ + (u32)(m2 >> 32); \
+ t += (u64)(u32)m1 + (u32)m2; \
+ } \
+ ADD128(rh, rl, (t >> 32), (t << 32)); \
+ } while (0)
+#endif
+
+static void poly_step_func(u64 *ahi, u64 *alo,
+ const u64 *kh, const u64 *kl,
+ const u64 *mh, const u64 *ml)
+{
+#define a0 (*(((u32 *)alo)+INDEX_LOW))
+#define a1 (*(((u32 *)alo)+INDEX_HIGH))
+#define a2 (*(((u32 *)ahi)+INDEX_LOW))
+#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
+#define k0 (*(((u32 *)kl)+INDEX_LOW))
+#define k1 (*(((u32 *)kl)+INDEX_HIGH))
+#define k2 (*(((u32 *)kh)+INDEX_LOW))
+#define k3 (*(((u32 *)kh)+INDEX_HIGH))
+
+ u64 p, q, t;
+ u32 t2;
+
+ p = MUL32(a3, k3);
+ p += p;
+ p += *(u64 *)mh;
+ p += MUL32(a0, k2);
+ p += MUL32(a1, k1);
+ p += MUL32(a2, k0);
+ t = (u32)(p);
+ p >>= 32;
+ p += MUL32(a0, k3);
+ p += MUL32(a1, k2);
+ p += MUL32(a2, k1);
+ p += MUL32(a3, k0);
+ t |= ((u64)((u32)p & 0x7fffffff)) << 32;
+ p >>= 31;
+ p += (u64)(((u32 *)ml)[INDEX_LOW]);
+ p += MUL32(a0, k0);
+ q = MUL32(a1, k3);
+ q += MUL32(a2, k2);
+ q += MUL32(a3, k1);
+ q += q;
+ p += q;
+ t2 = (u32)(p);
+ p >>= 32;
+ p += (u64)(((u32 *)ml)[INDEX_HIGH]);
+ p += MUL32(a0, k1);
+ p += MUL32(a1, k0);
+ q = MUL32(a2, k3);
+ q += MUL32(a3, k2);
+ q += q;
+ p += q;
+ *(u64 *)(alo) = (p << 32) | t2;
+ p >>= 32;
+ *(u64 *)(ahi) = p + t;
+
+#undef a0
+#undef a1
+#undef a2
+#undef a3
+#undef k0
+#undef k1
+#undef k2
+#undef k3
+}
+
+#define poly_step(ah, al, kh, kl, mh, ml) \
+ poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
+
+#endif /* end of specialized NH and poly definitions */
+
+/* At least nh_16 is defined. Defined others as needed here */
+#ifndef nh_16_2
+#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
+ do { \
+ nh_16(mp, kp, nw, rh, rl); \
+ nh_16(mp, ((kp)+2), nw, rh2, rl2); \
+ } while (0)
+#endif
+#ifndef nh_vmac_nhbytes
+#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
+ nh_16(mp, kp, nw, rh, rl)
+#endif
+#ifndef nh_vmac_nhbytes_2
+#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
+ do { \
+ nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
+ nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
+ } while (0)
+#endif
+
+static void vhash_abort(struct vmac_ctx *ctx)
+{
+ ctx->polytmp[0] = ctx->polykey[0] ;
+ ctx->polytmp[1] = ctx->polykey[1] ;
+ ctx->first_block_processed = 0;
+}
+
+static u64 l3hash(u64 p1, u64 p2,
+ u64 k1, u64 k2, u64 len)
+{
+ u64 rh, rl, t, z = 0;
+
+ /* fully reduce (p1,p2)+(len,0) mod p127 */
+ t = p1 >> 63;
+ p1 &= m63;
+ ADD128(p1, p2, len, t);
+ /* At this point, (p1,p2) is at most 2^127+(len<<64) */
+ t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
+ ADD128(p1, p2, z, t);
+ p1 &= m63;
+
+ /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
+ t = p1 + (p2 >> 32);
+ t += (t >> 32);
+ t += (u32)t > 0xfffffffeu;
+ p1 += (t >> 32);
+ p2 += (p1 << 32);
+
+ /* compute (p1+k1)%p64 and (p2+k2)%p64 */
+ p1 += k1;
+ p1 += (0 - (p1 < k1)) & 257;
+ p2 += k2;
+ p2 += (0 - (p2 < k2)) & 257;
+
+ /* compute (p1+k1)*(p2+k2)%p64 */
+ MUL64(rh, rl, p1, p2);
+ t = rh >> 56;
+ ADD128(t, rl, z, rh);
+ rh <<= 8;
+ ADD128(t, rl, z, rh);
+ t += t << 8;
+ rl += t;
+ rl += (0 - (rl < t)) & 257;
+ rl += (0 - (rl > p64-1)) & 257;
+ return rl;
+}
+
+static void vhash_update(const unsigned char *m,
+ unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
+ struct vmac_ctx *ctx)
+{
+ u64 rh, rl, *mptr;
+ const u64 *kptr = (u64 *)ctx->nhkey;
+ int i;
+ u64 ch, cl;
+ u64 pkh = ctx->polykey[0];
+ u64 pkl = ctx->polykey[1];
+
+ mptr = (u64 *)m;
+ i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
+
+ ch = ctx->polytmp[0];
+ cl = ctx->polytmp[1];
+
+ if (!ctx->first_block_processed) {
+ ctx->first_block_processed = 1;
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ ADD128(ch, cl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ i--;
+ }
+
+ while (i--) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ }
+
+ ctx->polytmp[0] = ch;
+ ctx->polytmp[1] = cl;
+}
+
+static u64 vhash(unsigned char m[], unsigned int mbytes,
+ u64 *tagl, struct vmac_ctx *ctx)
+{
+ u64 rh, rl, *mptr;
+ const u64 *kptr = (u64 *)ctx->nhkey;
+ int i, remaining;
+ u64 ch, cl;
+ u64 pkh = ctx->polykey[0];
+ u64 pkl = ctx->polykey[1];
+
+ mptr = (u64 *)m;
+ i = mbytes / VMAC_NHBYTES;
+ remaining = mbytes % VMAC_NHBYTES;
+
+ if (ctx->first_block_processed) {
+ ch = ctx->polytmp[0];
+ cl = ctx->polytmp[1];
+ } else if (i) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
+ ch &= m62;
+ ADD128(ch, cl, pkh, pkl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ i--;
+ } else if (remaining) {
+ nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
+ ch &= m62;
+ ADD128(ch, cl, pkh, pkl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ goto do_l3;
+ } else {/* Empty String */
+ ch = pkh; cl = pkl;
+ goto do_l3;
+ }
+
+ while (i--) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ }
+ if (remaining) {
+ nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ }
+
+do_l3:
+ vhash_abort(ctx);
+ remaining *= 8;
+ return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
+}
+
+static u64 vmac(unsigned char m[], unsigned int mbytes,
+ unsigned char n[16], u64 *tagl,
+ struct vmac_ctx_t *ctx)
+{
+ u64 *in_n, *out_p;
+ u64 p, h;
+ int i;
+
+ in_n = ctx->__vmac_ctx.cached_nonce;
+ out_p = ctx->__vmac_ctx.cached_aes;
+
+ i = n[15] & 1;
+ if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
+ in_n[0] = *(u64 *)(n);
+ in_n[1] = *(u64 *)(n+8);
+ ((unsigned char *)in_n)[15] &= 0xFE;
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out_p, (unsigned char *)in_n);
+
+ ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
+ }
+ p = be64_to_cpup(out_p + i);
+ h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
+ return p + h;
+}
+
+static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
+{
+ u64 in[2] = {0}, out[2];
+ unsigned i;
+ int err = 0;
+
+ err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+ if (err)
+ return err;
+
+ /* Fill nh key */
+ ((unsigned char *)in)[0] = 0x80;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
+ ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
+ ((unsigned char *)in)[15] += 1;
+ }
+
+ /* Fill poly key */
+ ((unsigned char *)in)[0] = 0xC0;
+ in[1] = 0;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.polytmp[i] =
+ ctx->__vmac_ctx.polykey[i] =
+ be64_to_cpup(out) & mpoly;
+ ctx->__vmac_ctx.polytmp[i+1] =
+ ctx->__vmac_ctx.polykey[i+1] =
+ be64_to_cpup(out+1) & mpoly;
+ ((unsigned char *)in)[15] += 1;
+ }
+
+ /* Fill ip key */
+ ((unsigned char *)in)[0] = 0xE0;
+ in[1] = 0;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+ do {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
+ ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
+ ((unsigned char *)in)[15] += 1;
+ } while (ctx->__vmac_ctx.l3key[i] >= p64
+ || ctx->__vmac_ctx.l3key[i+1] >= p64);
+ }
+
+ /* Invalidate nonce/aes cache and reset other elements */
+ ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
+ ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
+ ctx->__vmac_ctx.first_block_processed = 0;
+
+ return err;
+}
+
+static int vmac_setkey(struct crypto_shash *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+
+ if (keylen != VMAC_KEY_LEN) {
+ crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return vmac_set_key((u8 *)key, ctx);
+}
+
+static int vmac_init(struct shash_desc *pdesc)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+
+ memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ return 0;
+}
+
+static int vmac_update(struct shash_desc *pdesc, const u8 *p,
+ unsigned int len)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+
+ vhash_update(p, len, &ctx->__vmac_ctx);
+
+ return 0;
+}
+
+static int vmac_final(struct shash_desc *pdesc, u8 *out)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ vmac_t mac;
+ u8 nonce[16] = {};
+
+ mac = vmac(NULL, 0, nonce, NULL, ctx);
+ memcpy(out, &mac, sizeof(vmac_t));
+ memset(&mac, 0, sizeof(vmac_t));
+ memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ return 0;
+}
+
+static int vmac_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_cipher *cipher;
+ struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void vmac_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+ crypto_free_cipher(ctx->child);
+}
+
+static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct shash_instance *inst;
+ struct crypto_alg *alg;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ if (err)
+ return err;
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
+ CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(alg))
+ return PTR_ERR(alg);
+
+ inst = shash_alloc_instance("vmac", alg);
+ err = PTR_ERR(inst);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ err = crypto_init_spawn(shash_instance_ctx(inst), alg,
+ shash_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ inst->alg.digestsize = sizeof(vmac_t);
+ inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+ inst->alg.base.cra_init = vmac_init_tfm;
+ inst->alg.base.cra_exit = vmac_exit_tfm;
+
+ inst->alg.init = vmac_init;
+ inst->alg.update = vmac_update;
+ inst->alg.final = vmac_final;
+ inst->alg.setkey = vmac_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return err;
+}
+
+static struct crypto_template vmac_tmpl = {
+ .name = "vmac",
+ .create = vmac_create,
+ .free = shash_free_instance,
+ .module = THIS_MODULE,
+};
+
+static int __init vmac_module_init(void)
+{
+ return crypto_register_template(&vmac_tmpl);
+}
+
+static void __exit vmac_module_exit(void)
+{
+ crypto_unregister_template(&vmac_tmpl);
+}
+
+module_init(vmac_module_init);
+module_exit(vmac_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VMAC hash algorithm");
+
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index b63b633e549..bb7b67fba34 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -19,211 +19,142 @@
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
*/
-#include <crypto/scatterwalk.h>
-#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
#include <linux/err.h>
-#include <linux/hardirq.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/rtnetlink.h>
-#include <linux/slab.h>
-#include <linux/scatterlist.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
0x03030303, 0x03030303, 0x03030303, 0x03030303};
+
/*
* +------------------------
* | <parent tfm>
* +------------------------
- * | crypto_xcbc_ctx
+ * | xcbc_tfm_ctx
* +------------------------
- * | odds (block size)
+ * | consts (block size * 2)
* +------------------------
- * | prev (block size)
+ */
+struct xcbc_tfm_ctx {
+ struct crypto_cipher *child;
+ u8 ctx[];
+};
+
+/*
* +------------------------
- * | key (block size)
+ * | <shash desc>
* +------------------------
- * | consts (block size * 3)
+ * | xcbc_desc_ctx
+ * +------------------------
+ * | odds (block size)
+ * +------------------------
+ * | prev (block size)
* +------------------------
*/
-struct crypto_xcbc_ctx {
- struct crypto_cipher *child;
- u8 *odds;
- u8 *prev;
- u8 *key;
- u8 *consts;
- void (*xor)(u8 *a, const u8 *b, unsigned int bs);
- unsigned int keylen;
+struct xcbc_desc_ctx {
unsigned int len;
+ u8 ctx[];
};
-static void xor_128(u8 *a, const u8 *b, unsigned int bs)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
- ((u32 *)a)[2] ^= ((u32 *)b)[2];
- ((u32 *)a)[3] ^= ((u32 *)b)[3];
-}
-
-static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
- struct crypto_xcbc_ctx *ctx)
+static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
+ const u8 *inkey, unsigned int keylen)
{
- int bs = crypto_hash_blocksize(parent);
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
+ int bs = crypto_shash_blocksize(parent);
+ u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
int err = 0;
u8 key1[bs];
- if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
- return err;
+ if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
+ return err;
- crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts);
+ crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs);
+ crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2);
+ crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks);
return crypto_cipher_setkey(ctx->child, key1, bs);
-}
-
-static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
- const u8 *inkey, unsigned int keylen)
-{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
-
- if (keylen != crypto_cipher_blocksize(ctx->child))
- return -EINVAL;
- ctx->keylen = keylen;
- memcpy(ctx->key, inkey, keylen);
- ctx->consts = (u8*)ks;
-
- return _crypto_xcbc_digest_setkey(parent, ctx);
}
-static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
+static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm);
- int bs = crypto_hash_blocksize(pdesc->tfm);
+ unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ int bs = crypto_shash_blocksize(pdesc->tfm);
+ u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs;
ctx->len = 0;
- memset(ctx->odds, 0, bs);
- memset(ctx->prev, 0, bs);
+ memset(prev, 0, bs);
return 0;
}
-static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
- struct scatterlist *sg,
- unsigned int nbytes)
+static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
+ unsigned int len)
{
- struct crypto_hash *parent = pdesc->tfm;
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- struct crypto_cipher *tfm = ctx->child;
- int bs = crypto_hash_blocksize(parent);
-
- for (;;) {
- struct page *pg = sg_page(sg);
- unsigned int offset = sg->offset;
- unsigned int slen = sg->length;
-
- if (unlikely(slen > nbytes))
- slen = nbytes;
-
- nbytes -= slen;
-
- while (slen > 0) {
- unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
- char *p = crypto_kmap(pg, 0) + offset;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(ctx->odds + ctx->len, p, len);
- ctx->len += len;
- slen -= len;
-
- /* checking the rest of the page */
- if (len + offset >= PAGE_SIZE) {
- offset = 0;
- pg++;
- } else
- offset += len;
-
- crypto_kunmap(p, 0);
- crypto_yield(pdesc->flags);
- continue;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(ctx->odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- ctx->xor(ctx->prev, ctx->odds, bs);
- crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev);
-
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
- ctx->xor(ctx->prev, p, bs);
- crypto_cipher_encrypt_one(tfm, ctx->prev,
- ctx->prev);
- p += bs;
- len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(ctx->odds, p, len);
- ctx->len = len;
- }
- crypto_kunmap(p, 0);
- crypto_yield(pdesc->flags);
- slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
- offset = 0;
- pg++;
- }
-
- if (!nbytes)
- break;
- sg = scatterwalk_sg_next(sg);
+ struct crypto_shash *parent = pdesc->tfm;
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ struct crypto_cipher *tfm = tctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *prev = odds + bs;
+
+ /* checking the data can fill the block */
+ if ((ctx->len + len) <= bs) {
+ memcpy(odds + ctx->len, p, len);
+ ctx->len += len;
+ return 0;
}
- return 0;
-}
+ /* filling odds with new data and encrypting it */
+ memcpy(odds + ctx->len, p, bs - ctx->len);
+ len -= bs - ctx->len;
+ p += bs - ctx->len;
-static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
- struct scatterlist *sg,
- unsigned int nbytes)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
- return crypto_xcbc_digest_update2(pdesc, sg, nbytes);
-}
+ crypto_xor(prev, odds, bs);
+ crypto_cipher_encrypt_one(tfm, prev, prev);
-static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
-{
- struct crypto_hash *parent = pdesc->tfm;
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- struct crypto_cipher *tfm = ctx->child;
- int bs = crypto_hash_blocksize(parent);
- int err = 0;
-
- if (ctx->len == bs) {
- u8 key2[bs];
+ /* clearing the length */
+ ctx->len = 0;
- if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
- return err;
+ /* encrypting the rest of data */
+ while (len > bs) {
+ crypto_xor(prev, p, bs);
+ crypto_cipher_encrypt_one(tfm, prev, prev);
+ p += bs;
+ len -= bs;
+ }
- crypto_cipher_encrypt_one(tfm, key2,
- (u8 *)(ctx->consts + bs));
+ /* keeping the surplus of blocksize */
+ if (len) {
+ memcpy(odds, p, len);
+ ctx->len = len;
+ }
- ctx->xor(ctx->prev, ctx->odds, bs);
- ctx->xor(ctx->prev, key2, bs);
- _crypto_xcbc_digest_setkey(parent, ctx);
+ return 0;
+}
- crypto_cipher_encrypt_one(tfm, out, ctx->prev);
- } else {
- u8 key3[bs];
+static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ struct crypto_cipher *tfm = tctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1);
+ u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *prev = odds + bs;
+ unsigned int offset = 0;
+
+ if (ctx->len != bs) {
unsigned int rlen;
- u8 *p = ctx->odds + ctx->len;
+ u8 *p = odds + ctx->len;
+
*p = 0x80;
p++;
@@ -231,32 +162,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
if (rlen)
memset(p, 0, rlen);
- if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
- return err;
-
- crypto_cipher_encrypt_one(tfm, key3,
- (u8 *)(ctx->consts + bs * 2));
-
- ctx->xor(ctx->prev, ctx->odds, bs);
- ctx->xor(ctx->prev, key3, bs);
-
- _crypto_xcbc_digest_setkey(parent, ctx);
-
- crypto_cipher_encrypt_one(tfm, out, ctx->prev);
+ offset += bs;
}
- return 0;
-}
+ crypto_xor(prev, odds, bs);
+ crypto_xor(prev, consts + offset, bs);
-static int crypto_xcbc_digest(struct hash_desc *pdesc,
- struct scatterlist *sg, unsigned int nbytes, u8 *out)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
+ crypto_cipher_encrypt_one(tfm, out, prev);
- crypto_xcbc_digest_init(pdesc);
- crypto_xcbc_digest_update2(pdesc, sg, nbytes);
- return crypto_xcbc_digest_final(pdesc, out);
+ return 0;
}
static int xcbc_init_tfm(struct crypto_tfm *tfm)
@@ -264,95 +178,95 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm)
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
- int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
+ struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- switch(bs) {
- case 16:
- ctx->xor = xor_128;
- break;
- default:
- return -EINVAL;
- }
-
ctx->child = cipher;
- ctx->odds = (u8*)(ctx+1);
- ctx->prev = ctx->odds + bs;
- ctx->key = ctx->prev + bs;
return 0;
};
static void xcbc_exit_tfm(struct crypto_tfm *tfm)
{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
+ struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
-static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
+static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
+ struct shash_instance *inst;
struct crypto_alg *alg;
+ unsigned long alignmask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
- return ERR_PTR(err);
+ return err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ return PTR_ERR(alg);
switch(alg->cra_blocksize) {
case 16:
break;
default:
- inst = ERR_PTR(-EINVAL);
goto out_put_alg;
}
- inst = crypto_alloc_instance("xcbc", alg);
+ inst = shash_alloc_instance("xcbc", alg);
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_hash_type;
-
- inst->alg.cra_hash.digestsize = alg->cra_blocksize;
- inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
- ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
- inst->alg.cra_init = xcbc_init_tfm;
- inst->alg.cra_exit = xcbc_exit_tfm;
-
- inst->alg.cra_hash.init = crypto_xcbc_digest_init;
- inst->alg.cra_hash.update = crypto_xcbc_digest_update;
- inst->alg.cra_hash.final = crypto_xcbc_digest_final;
- inst->alg.cra_hash.digest = crypto_xcbc_digest;
- inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey;
+ err = crypto_init_spawn(shash_instance_ctx(inst), alg,
+ shash_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_free_inst;
+
+ alignmask = alg->cra_alignmask | 3;
+ inst->alg.base.cra_alignmask = alignmask;
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+
+ inst->alg.digestsize = alg->cra_blocksize;
+ inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx),
+ crypto_tfm_ctx_alignment()) +
+ (alignmask &
+ ~(crypto_tfm_ctx_alignment() - 1)) +
+ alg->cra_blocksize * 2;
+
+ inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx),
+ alignmask + 1) +
+ alg->cra_blocksize * 2;
+ inst->alg.base.cra_init = xcbc_init_tfm;
+ inst->alg.base.cra_exit = xcbc_exit_tfm;
+
+ inst->alg.init = crypto_xcbc_digest_init;
+ inst->alg.update = crypto_xcbc_digest_update;
+ inst->alg.final = crypto_xcbc_digest_final;
+ inst->alg.setkey = crypto_xcbc_digest_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
-}
-
-static void xcbc_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
+ return err;
}
static struct crypto_template crypto_xcbc_tmpl = {
.name = "xcbc",
- .alloc = xcbc_alloc,
- .free = xcbc_free,
+ .create = xcbc_create,
+ .free = shash_free_instance,
.module = THIS_MODULE,
};