diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-30 21:27:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-30 21:27:13 -0700 |
commit | 5d95ff84e62be914b4a4dabfa814e4096b05b1b0 (patch) | |
tree | f2d79d562971025b29deab50bb06e3b865f185b3 /crypto | |
parent | d85a143b69abb4d7544227e26d12c4c7735ab27d (diff) | |
parent | 486bfb05913ac9969a3a71a4dc48f17f31cb162d (diff) | |
download | linux-rpi-5d95ff84e62be914b4a4dabfa814e4096b05b1b0.tar.gz linux-rpi-5d95ff84e62be914b4a4dabfa814e4096b05b1b0.tar.bz2 linux-rpi-5d95ff84e62be914b4a4dabfa814e4096b05b1b0.zip |
Merge tag 'v6.5-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Add linear akcipher/sig API
- Add tfm cloning (hmac, cmac)
- Add statesize to crypto_ahash
Algorithms:
- Allow only odd e and restrict value in FIPS mode for RSA
- Replace LFSR with SHA3-256 in jitter
- Add interface for gathering of raw entropy in jitter
Drivers:
- Fix race on data_avail and actual data in hwrng/virtio
- Add hash and HMAC support in starfive
- Add RSA algo support in starfive
- Add support for PCI device 0x156E in ccp"
* tag 'v6.5-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (85 commits)
crypto: akcipher - Do not copy dst if it is NULL
crypto: sig - Fix verify call
crypto: akcipher - Set request tfm on sync path
crypto: sm2 - Provide sm2_compute_z_digest when sm2 is disabled
hwrng: imx-rngc - switch to DEFINE_SIMPLE_DEV_PM_OPS
hwrng: st - keep clock enabled while hwrng is registered
hwrng: st - support compile-testing
hwrng: imx-rngc - fix the timeout for init and self check
KEYS: asymmetric: Use new crypto interface without scatterlists
KEYS: asymmetric: Move sm2 code into x509_public_key
KEYS: Add forward declaration in asymmetric-parser.h
crypto: sig - Add interface for sign/verify
crypto: akcipher - Add sync interface without SG lists
crypto: cipher - On clone do crypto_mod_get()
crypto: api - Add __crypto_alloc_tfmgfp
crypto: api - Remove crypto_init_ops()
crypto: rsa - allow only odd e and restrict value in FIPS mode
crypto: geniv - Split geniv out of AEAD Kconfig option
crypto: algboss - Add missing dependency on RNG2
crypto: starfive - Add RSA algo support
...
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 60 | ||||
-rw-r--r-- | crypto/Makefile | 4 | ||||
-rw-r--r-- | crypto/aegis-neon.h | 17 | ||||
-rw-r--r-- | crypto/aegis128-neon-inner.c | 1 | ||||
-rw-r--r-- | crypto/aegis128-neon.c | 12 | ||||
-rw-r--r-- | crypto/ahash.c | 9 | ||||
-rw-r--r-- | crypto/akcipher.c | 124 | ||||
-rw-r--r-- | crypto/api.c | 27 | ||||
-rw-r--r-- | crypto/asymmetric_keys/public_key.c | 321 | ||||
-rw-r--r-- | crypto/asymmetric_keys/x509_public_key.c | 29 | ||||
-rw-r--r-- | crypto/cipher.c | 28 | ||||
-rw-r--r-- | crypto/cmac.c | 36 | ||||
-rw-r--r-- | crypto/hmac.c | 1 | ||||
-rw-r--r-- | crypto/internal.h | 22 | ||||
-rw-r--r-- | crypto/jitterentropy-kcapi.c | 190 | ||||
-rw-r--r-- | crypto/jitterentropy-testing.c | 294 | ||||
-rw-r--r-- | crypto/jitterentropy.c | 154 | ||||
-rw-r--r-- | crypto/jitterentropy.h | 20 | ||||
-rw-r--r-- | crypto/rsa.c | 36 | ||||
-rw-r--r-- | crypto/shash.c | 12 | ||||
-rw-r--r-- | crypto/sig.c | 157 | ||||
-rw-r--r-- | crypto/sm2.c | 106 |
22 files changed, 1249 insertions, 411 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index a0e080d5f6ae..650b1b3620d8 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -71,8 +71,15 @@ config CRYPTO_AEAD config CRYPTO_AEAD2 tristate select CRYPTO_ALGAPI2 - select CRYPTO_NULL2 - select CRYPTO_RNG2 + +config CRYPTO_SIG + tristate + select CRYPTO_SIG2 + select CRYPTO_ALGAPI + +config CRYPTO_SIG2 + tristate + select CRYPTO_ALGAPI2 config CRYPTO_SKCIPHER tristate @@ -82,7 +89,6 @@ config CRYPTO_SKCIPHER config CRYPTO_SKCIPHER2 tristate select CRYPTO_ALGAPI2 - select CRYPTO_RNG2 config CRYPTO_HASH tristate @@ -143,12 +149,14 @@ config CRYPTO_MANAGER config CRYPTO_MANAGER2 def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y) + select CRYPTO_ACOMP2 select CRYPTO_AEAD2 - select CRYPTO_HASH2 - select CRYPTO_SKCIPHER2 select CRYPTO_AKCIPHER2 + select CRYPTO_SIG2 + select CRYPTO_HASH2 select CRYPTO_KPP2 - select CRYPTO_ACOMP2 + select CRYPTO_RNG2 + select CRYPTO_SKCIPHER2 config CRYPTO_USER tristate "Userspace cryptographic algorithm configuration" @@ -833,13 +841,16 @@ config CRYPTO_GCM This is required for IPSec ESP (XFRM_ESP). -config CRYPTO_SEQIV - tristate "Sequence Number IV Generator" +config CRYPTO_GENIV + tristate select CRYPTO_AEAD - select CRYPTO_SKCIPHER select CRYPTO_NULL - select CRYPTO_RNG_DEFAULT select CRYPTO_MANAGER + select CRYPTO_RNG_DEFAULT + +config CRYPTO_SEQIV + tristate "Sequence Number IV Generator" + select CRYPTO_GENIV help Sequence Number IV generator @@ -850,10 +861,7 @@ config CRYPTO_SEQIV config CRYPTO_ECHAINIV tristate "Encrypted Chain IV Generator" - select CRYPTO_AEAD - select CRYPTO_NULL - select CRYPTO_RNG_DEFAULT - select CRYPTO_MANAGER + select CRYPTO_GENIV help Encrypted Chain IV generator @@ -1277,6 +1285,7 @@ endif # if CRYPTO_DRBG_MENU config CRYPTO_JITTERENTROPY tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)" select CRYPTO_RNG + select CRYPTO_SHA3 help CPU Jitter RNG (Random Number Generator) from the Jitterentropy library @@ -1287,6 +1296,26 @@ config CRYPTO_JITTERENTROPY See https://www.chronox.de/jent.html +config CRYPTO_JITTERENTROPY_TESTINTERFACE + bool "CPU Jitter RNG Test Interface" + depends on CRYPTO_JITTERENTROPY + help + The test interface allows a privileged process to capture + the raw unconditioned high resolution time stamp noise that + is collected by the Jitter RNG for statistical analysis. As + this data is used at the same time to generate random bits, + the Jitter RNG operates in an insecure mode as long as the + recording is enabled. This interface therefore is only + intended for testing purposes and is not suitable for + production systems. + + The raw noise data can be obtained using the jent_raw_hires + debugfs file. Using the option + jitterentropy_testing.boot_raw_hires_test=1 the raw noise of + the first 1000 entropy events since boot can be sampled. + + If unsure, select N. + config CRYPTO_KDF800108_CTR tristate select CRYPTO_HMAC @@ -1372,6 +1401,9 @@ config CRYPTO_STATS help Enable the gathering of crypto stats. + Enabling this option reduces the performance of the crypto API. It + should only be enabled when there is actually a use case for it. + This collects data sizes, numbers of requests, and numbers of errors processed by: - AEAD ciphers (encrypt, decrypt) diff --git a/crypto/Makefile b/crypto/Makefile index d0126c915834..953a7e105e58 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -14,7 +14,7 @@ crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y) obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o obj-$(CONFIG_CRYPTO_AEAD2) += aead.o -obj-$(CONFIG_CRYPTO_AEAD2) += geniv.o +obj-$(CONFIG_CRYPTO_GENIV) += geniv.o obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o @@ -25,6 +25,7 @@ crypto_hash-y += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o +obj-$(CONFIG_CRYPTO_SIG2) += sig.o obj-$(CONFIG_CRYPTO_KPP2) += kpp.o dh_generic-y := dh.o @@ -171,6 +172,7 @@ CFLAGS_jitterentropy.o = -O0 KASAN_SANITIZE_jitterentropy.o = n UBSAN_SANITIZE_jitterentropy.o = n jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o +obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o diff --git a/crypto/aegis-neon.h b/crypto/aegis-neon.h new file mode 100644 index 000000000000..61e5614b45de --- /dev/null +++ b/crypto/aegis-neon.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef _AEGIS_NEON_H +#define _AEGIS_NEON_H + +void crypto_aegis128_init_neon(void *state, const void *key, const void *iv); +void crypto_aegis128_update_neon(void *state, const void *msg); +void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); +void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); +int crypto_aegis128_final_neon(void *state, void *tag_xor, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize); + +#endif diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c index 7de485907d81..b6a52a386b22 100644 --- a/crypto/aegis128-neon-inner.c +++ b/crypto/aegis128-neon-inner.c @@ -16,6 +16,7 @@ #define AEGIS_BLOCK_SIZE 16 #include <stddef.h> +#include "aegis-neon.h" extern int aegis128_have_aes_insn; diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c index a7856915ec85..9ee50549e823 100644 --- a/crypto/aegis128-neon.c +++ b/crypto/aegis128-neon.c @@ -7,17 +7,7 @@ #include <asm/neon.h> #include "aegis.h" - -void crypto_aegis128_init_neon(void *state, const void *key, const void *iv); -void crypto_aegis128_update_neon(void *state, const void *msg); -void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size); -void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size); -int crypto_aegis128_final_neon(void *state, void *tag_xor, - unsigned int assoclen, - unsigned int cryptlen, - unsigned int authsize); +#include "aegis-neon.h" int aegis128_have_aes_insn __ro_after_init; diff --git a/crypto/ahash.c b/crypto/ahash.c index 324651040446..709ef0940799 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -31,12 +31,6 @@ struct ahash_request_priv { void *ubuf[] CRYPTO_MINALIGN_ATTR; }; -static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) -{ - return container_of(crypto_hash_alg_common(hash), struct ahash_alg, - halg); -} - static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int alignmask = walk->alignmask; @@ -432,6 +426,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) hash->setkey = ahash_nosetkey; + crypto_ahash_set_statesize(hash, alg->halg.statesize); + if (tfm->__crt_alg->cra_type != &crypto_ahash_type) return crypto_init_shash_ops_async(tfm); @@ -573,6 +569,7 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) nhash->import = hash->import; nhash->setkey = hash->setkey; nhash->reqsize = hash->reqsize; + nhash->statesize = hash->statesize; if (tfm->__crt_alg->cra_type != &crypto_ahash_type) return crypto_clone_shash_ops_async(nhash, hash); diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 7960ceb528c3..52813f0b19e4 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -10,6 +10,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> @@ -17,6 +18,8 @@ #include "internal.h" +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e + static int __maybe_unused crypto_akcipher_report( struct sk_buff *skb, struct crypto_alg *alg) { @@ -105,7 +108,7 @@ static const struct crypto_type crypto_akcipher_type = { .report_stat = crypto_akcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, - .maskset = CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AKCIPHER, .tfmsize = offsetof(struct crypto_akcipher, base), }; @@ -186,5 +189,124 @@ int akcipher_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(akcipher_register_instance); +int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data) +{ + unsigned int reqsize = crypto_akcipher_reqsize(data->tfm); + struct akcipher_request *req; + struct scatterlist *sg; + unsigned int mlen; + unsigned int len; + u8 *buf; + + if (data->dst) + mlen = max(data->slen, data->dlen); + else + mlen = data->slen + data->dlen; + + len = sizeof(*req) + reqsize + mlen; + if (len < mlen) + return -EOVERFLOW; + + req = kzalloc(len, GFP_KERNEL); + if (!req) + return -ENOMEM; + + data->req = req; + akcipher_request_set_tfm(req, data->tfm); + + buf = (u8 *)(req + 1) + reqsize; + data->buf = buf; + memcpy(buf, data->src, data->slen); + + sg = &data->sg; + sg_init_one(sg, buf, mlen); + akcipher_request_set_crypt(req, sg, data->dst ? sg : NULL, + data->slen, data->dlen); + + crypto_init_wait(&data->cwait); + akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &data->cwait); + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_akcipher_sync_prep); + +int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err) +{ + err = crypto_wait_req(err, &data->cwait); + if (data->dst) + memcpy(data->dst, data->buf, data->dlen); + data->dlen = data->req->dst_len; + kfree_sensitive(data->req); + return err; +} +EXPORT_SYMBOL_GPL(crypto_akcipher_sync_post); + +int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + struct crypto_akcipher_sync_data data = { + .tfm = tfm, + .src = src, + .dst = dst, + .slen = slen, + .dlen = dlen, + }; + + return crypto_akcipher_sync_prep(&data) ?: + crypto_akcipher_sync_post(&data, + crypto_akcipher_encrypt(data.req)); +} +EXPORT_SYMBOL_GPL(crypto_akcipher_sync_encrypt); + +int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + struct crypto_akcipher_sync_data data = { + .tfm = tfm, + .src = src, + .dst = dst, + .slen = slen, + .dlen = dlen, + }; + + return crypto_akcipher_sync_prep(&data) ?: + crypto_akcipher_sync_post(&data, + crypto_akcipher_decrypt(data.req)) ?: + data.dlen; +} +EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt); + +static void crypto_exit_akcipher_ops_sig(struct crypto_tfm *tfm) +{ + struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm); + + crypto_free_akcipher(*ctx); +} + +int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm) +{ + struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_akcipher *akcipher; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + akcipher = crypto_create_tfm(calg, &crypto_akcipher_type); + if (IS_ERR(akcipher)) { + crypto_mod_put(calg); + return PTR_ERR(akcipher); + } + + *ctx = akcipher; + tfm->exit = crypto_exit_akcipher_ops_sig; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_init_akcipher_ops_sig); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic public key cipher type"); diff --git a/crypto/api.c b/crypto/api.c index d375e8cd770d..b9cc0c906efe 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -345,15 +345,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); -static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) -{ - const struct crypto_type *type_obj = tfm->__crt_alg->cra_type; - - if (type_obj) - return type_obj->init(tfm, type, mask); - return 0; -} - static void crypto_exit_ops(struct crypto_tfm *tfm) { const struct crypto_type *type = tfm->__crt_alg->cra_type; @@ -395,25 +386,21 @@ void crypto_shoot_alg(struct crypto_alg *alg) } EXPORT_SYMBOL_GPL(crypto_shoot_alg); -struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, - u32 mask) +struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, + u32 mask, gfp_t gfp) { struct crypto_tfm *tfm = NULL; unsigned int tfm_size; int err = -ENOMEM; tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); - tfm = kzalloc(tfm_size, GFP_KERNEL); + tfm = kzalloc(tfm_size, gfp); if (tfm == NULL) goto out_err; tfm->__crt_alg = alg; refcount_set(&tfm->refcnt, 1); - err = crypto_init_ops(tfm, type, mask); - if (err) - goto out_free_tfm; - if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) goto cra_init_failed; @@ -421,7 +408,6 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, cra_init_failed: crypto_exit_ops(tfm); -out_free_tfm: if (err == -EAGAIN) crypto_shoot_alg(alg); kfree(tfm); @@ -430,6 +416,13 @@ out_err: out: return tfm; } +EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp); + +struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, + u32 mask) +{ + return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL); +} EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); /* diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 50c933f86b21..e787598cb3f7 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -8,18 +8,17 @@ */ #define pr_fmt(fmt) "PKEY: "fmt -#include <linux/module.h> -#include <linux/export.h> +#include <crypto/akcipher.h> +#include <crypto/public_key.h> +#include <crypto/sig.h> +#include <keys/asymmetric-subtype.h> +#include <linux/asn1.h> +#include <linux/err.h> #include <linux/kernel.h> -#include <linux/slab.h> +#include <linux/module.h> #include <linux/seq_file.h> -#include <linux/scatterlist.h> -#include <linux/asn1.h> -#include <keys/asymmetric-subtype.h> -#include <crypto/public_key.h> -#include <crypto/akcipher.h> -#include <crypto/sm2.h> -#include <crypto/sm3_base.h> +#include <linux/slab.h> +#include <linux/string.h> MODULE_DESCRIPTION("In-software asymmetric public-key subtype"); MODULE_AUTHOR("Red Hat, Inc."); @@ -67,10 +66,13 @@ static void public_key_destroy(void *payload0, void *payload3) static int software_key_determine_akcipher(const struct public_key *pkey, const char *encoding, const char *hash_algo, - char alg_name[CRYPTO_MAX_ALG_NAME]) + char alg_name[CRYPTO_MAX_ALG_NAME], bool *sig, + enum kernel_pkey_operation op) { int n; + *sig = true; + if (!encoding) return -EINVAL; @@ -79,14 +81,18 @@ software_key_determine_akcipher(const struct public_key *pkey, * RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2]. */ if (strcmp(encoding, "pkcs1") == 0) { - if (!hash_algo) + if (!hash_algo) { + *sig = false; n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", pkey->pkey_algo); - else + } else { + *sig = op == kernel_pkey_sign || + op == kernel_pkey_verify; n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)", pkey->pkey_algo, hash_algo); + } return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; } if (strcmp(encoding, "raw") != 0) @@ -97,6 +103,7 @@ software_key_determine_akcipher(const struct public_key *pkey, */ if (hash_algo) return -EINVAL; + *sig = false; } else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { if (strcmp(encoding, "x962") != 0) return -EINVAL; @@ -154,37 +161,70 @@ static int software_key_query(const struct kernel_pkey_params *params, struct crypto_akcipher *tfm; struct public_key *pkey = params->key->payload.data[asym_crypto]; char alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_sig *sig; u8 *key, *ptr; int ret, len; + bool issig; ret = software_key_determine_akcipher(pkey, params->encoding, - params->hash_algo, alg_name); + params->hash_algo, alg_name, + &issig, kernel_pkey_sign); if (ret < 0) return ret; - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - ret = -ENOMEM; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) - goto error_free_tfm; + return -ENOMEM; + memcpy(key, pkey->key, pkey->keylen); ptr = key + pkey->keylen; ptr = pkey_pack_u32(ptr, pkey->algo); ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); - if (pkey->key_is_private) - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); - else - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); - if (ret < 0) - goto error_free_key; + if (issig) { + sig = crypto_alloc_sig(alg_name, 0, 0); + if (IS_ERR(sig)) + goto error_free_key; + + if (pkey->key_is_private) + ret = crypto_sig_set_privkey(sig, key, pkey->keylen); + else + ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); + if (ret < 0) + goto error_free_tfm; + + len = crypto_sig_maxsize(sig); + + info->supported_ops = KEYCTL_SUPPORTS_VERIFY; + if (pkey->key_is_private) + info->supported_ops |= KEYCTL_SUPPORTS_SIGN; + + if (strcmp(params->encoding, "pkcs1") == 0) { + info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT; + if (pkey->key_is_private) + info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; + } + } else { + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) + goto error_free_key; + + if (pkey->key_is_private) + ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); + else + ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); + if (ret < 0) + goto error_free_tfm; + + len = crypto_akcipher_maxsize(tfm); + + info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT; + if (pkey->key_is_private) + info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; + } - len = crypto_akcipher_maxsize(tfm); info->key_size = len * 8; if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { @@ -210,17 +250,16 @@ static int software_key_query(const struct kernel_pkey_params *params, info->max_enc_size = len; info->max_dec_size = len; - info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT | - KEYCTL_SUPPORTS_VERIFY); - if (pkey->key_is_private) - info->supported_ops |= (KEYCTL_SUPPORTS_DECRYPT | - KEYCTL_SUPPORTS_SIGN); + ret = 0; +error_free_tfm: + if (issig) + crypto_free_sig(sig); + else + crypto_free_akcipher(tfm); error_free_key: kfree(key); -error_free_tfm: - crypto_free_akcipher(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } @@ -232,34 +271,26 @@ static int software_key_eds_op(struct kernel_pkey_params *params, const void *in, void *out) { const struct public_key *pkey = params->key->payload.data[asym_crypto]; - struct akcipher_request *req; - struct crypto_akcipher *tfm; - struct crypto_wait cwait; - struct scatterlist in_sg, out_sg; char alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_akcipher *tfm; + struct crypto_sig *sig; char *key, *ptr; + bool issig; + int ksz; int ret; pr_devel("==>%s()\n", __func__); ret = software_key_determine_akcipher(pkey, params->encoding, - params->hash_algo, alg_name); + params->hash_algo, alg_name, + &issig, params->op); if (ret < 0) return ret; - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - ret = -ENOMEM; - req = akcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) - goto error_free_tfm; - key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) - goto error_free_req; + return -ENOMEM; memcpy(key, pkey->key, pkey->keylen); ptr = key + pkey->keylen; @@ -267,123 +298,84 @@ static int software_key_eds_op(struct kernel_pkey_params *params, ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); - if (pkey->key_is_private) - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); - else - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); - if (ret) - goto error_free_key; + if (issig) { + sig = crypto_alloc_sig(alg_name, 0, 0); + if (IS_ERR(sig)) + goto error_free_key; + + if (pkey->key_is_private) + ret = crypto_sig_set_privkey(sig, key, pkey->keylen); + else + ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); + if (ret) + goto error_free_tfm; + + ksz = crypto_sig_maxsize(sig); + } else { + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) + goto error_free_key; + + if (pkey->key_is_private) + ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); + else + ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); + if (ret) + goto error_free_tfm; + + ksz = crypto_akcipher_maxsize(tfm); + } - sg_init_one(&in_sg, in, params->in_len); - sg_init_one(&out_sg, out, params->out_len); - akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len, - params->out_len); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, &cwait); + ret = -EINVAL; /* Perform the encryption calculation. */ switch (params->op) { case kernel_pkey_encrypt: - ret = crypto_akcipher_encrypt(req); + if (issig) + break; + ret = crypto_akcipher_sync_encrypt(tfm, in, params->in_len, + out, params->out_len); break; case kernel_pkey_decrypt: - ret = crypto_akcipher_decrypt(req); + if (issig) + break; + ret = crypto_akcipher_sync_decrypt(tfm, in, params->in_len, + out, params->out_len); break; case kernel_pkey_sign: - ret = crypto_akcipher_sign(req); + if (!issig) + break; + ret = crypto_sig_sign(sig, in, params->in_len, + out, params->out_len); break; default: BUG(); } - ret = crypto_wait_req(ret, &cwait); if (ret == 0) - ret = req->dst_len; + ret = ksz; +error_free_tfm: + if (issig) + crypto_free_sig(sig); + else + crypto_free_akcipher(tfm); error_free_key: kfree(key); -error_free_req: - akcipher_request_free(req); -error_free_tfm: - crypto_free_akcipher(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } -#if IS_REACHABLE(CONFIG_CRYPTO_SM2) -static int cert_sig_digest_update(const struct public_key_signature *sig, - struct crypto_akcipher *tfm_pkey) -{ - struct crypto_shash *tfm; - struct shash_desc *desc; - size_t desc_size; - unsigned char dgst[SM3_DIGEST_SIZE]; - int ret; - - BUG_ON(!sig->data); - - /* SM2 signatures always use the SM3 hash algorithm */ - if (!sig->hash_algo || strcmp(sig->hash_algo, "sm3") != 0) - return -EINVAL; - - ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID, - SM2_DEFAULT_USERID_LEN, dgst); - if (ret) - return ret; - - tfm = crypto_alloc_shash(sig->hash_algo, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); - desc = kzalloc(desc_size, GFP_KERNEL); - if (!desc) { - ret = -ENOMEM; - goto error_free_tfm; - } - - desc->tfm = tfm; - - ret = crypto_shash_init(desc); - if (ret < 0) - goto error_free_desc; - - ret = crypto_shash_update(desc, dgst, SM3_DIGEST_SIZE); - if (ret < 0) - goto error_free_desc; - - ret = crypto_shash_finup(desc, sig->data, sig->data_size, sig->digest); - -error_free_desc: - kfree(desc); -error_free_tfm: - crypto_free_shash(tfm); - return ret; -} -#else -static inline int cert_sig_digest_update( - const struct public_key_signature *sig, - struct crypto_akcipher *tfm_pkey) -{ - return -ENOTSUPP; -} -#endif /* ! IS_REACHABLE(CONFIG_CRYPTO_SM2) */ - /* * Verify a signature using a public key. */ int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig) { - struct crypto_wait cwait; - struct crypto_akcipher *tfm; - struct akcipher_request *req; - struct scatterlist src_sg; char alg_name[CRYPTO_MAX_ALG_NAME]; - char *buf, *ptr; - size_t buf_len; + struct crypto_sig *tfm; + char *key, *ptr; + bool issig; int ret; pr_devel("==>%s()\n", __func__); @@ -408,63 +400,40 @@ int public_key_verify_signature(const struct public_key *pkey, } ret = software_key_determine_akcipher(pkey, sig->encoding, - sig->hash_algo, alg_name); + sig->hash_algo, alg_name, + &issig, kernel_pkey_verify); if (ret < 0) return ret; - tfm = crypto_alloc_akcipher(alg_name, 0, 0); + tfm = crypto_alloc_sig(alg_name, 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); - ret = -ENOMEM; - req = akcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) + key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, + GFP_KERNEL); + if (!key) goto error_free_tfm; - buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, - sig->s_size + sig->digest_size); - - buf = kmalloc(buf_len, GFP_KERNEL); - if (!buf) - goto error_free_req; - - memcpy(buf, pkey->key, pkey->keylen); - ptr = buf + pkey->keylen; + memcpy(key, pkey->key, pkey->keylen); + ptr = key + pkey->keylen; ptr = pkey_pack_u32(ptr, pkey->algo); ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); if (pkey->key_is_private) - ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen); + ret = crypto_sig_set_privkey(tfm, key, pkey->keylen); else - ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen); + ret = crypto_sig_set_pubkey(tfm, key, pkey->keylen); if (ret) - goto error_free_buf; + goto error_free_key; - if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) { - ret = cert_sig_digest_update(sig, tfm); - if (ret) - goto error_free_buf; - } + ret = crypto_sig_verify(tfm, sig->s, sig->s_size, + sig->digest, sig->digest_size); - memcpy(buf, sig->s, sig->s_size); - memcpy(buf + sig->s_size, sig->digest, sig->digest_size); - - sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size); - akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size, - sig->digest_size); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, &cwait); - ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); - -error_free_buf: - kfree(buf); -error_free_req: - akcipher_request_free(req); +error_free_key: + kfree(key); error_free_tfm: - crypto_free_akcipher(tfm); + crypto_free_sig(tfm); pr_devel("<==%s() = %d\n", __func__, ret); if (WARN_ON_ONCE(ret > 0)) ret = -EINVAL; diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 0b4943a4592b..6fdfc82e23a8 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -6,13 +6,15 @@ */ #define pr_fmt(fmt) "X.509: "fmt +#include <crypto/hash.h> +#include <crypto/sm2.h> +#include <keys/asymmetric-parser.h> +#include <keys/asymmetric-subtype.h> +#include <keys/system_keyring.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <keys/asymmetric-subtype.h> -#include <keys/asymmetric-parser.h> -#include <keys/system_keyring.h> -#include <crypto/hash.h> +#include <linux/string.h> #include "asymmetric_keys.h" #include "x509_parser.h" @@ -30,9 +32,6 @@ int x509_get_sig_params(struct x509_certificate *cert) pr_devel("==>%s()\n", __func__); - sig->data = cert->tbs; - sig->data_size = cert->tbs_size; - sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL); if (!sig->s) return -ENOMEM; @@ -65,7 +64,21 @@ int x509_get_sig_params(struct x509_certificate *cert) desc->tfm = tfm; - ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest); + if (strcmp(cert->pub->pkey_algo, "sm2") == 0) { + ret = strcmp(sig->hash_algo, "sm3") != 0 ? -EINVAL : + crypto_shash_init(desc) ?: + sm2_compute_z_digest(desc, cert->pub->key, + cert->pub->keylen, sig->digest) ?: + crypto_shash_init(desc) ?: + crypto_shash_update(desc, sig->digest, + sig->digest_size) ?: + crypto_shash_finup(desc, cert->tbs, cert->tbs_size, + sig->digest); + } else { + ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, + sig->digest); + } + if (ret < 0) goto error_2; diff --git a/crypto/cipher.c b/crypto/cipher.c index b47141ed4a9f..47c77a3e5978 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -90,3 +90,31 @@ void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, cipher_crypt_one(tfm, dst, src, false); } EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL); + +struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher) +{ + struct crypto_tfm *tfm = crypto_cipher_tfm(cipher); + struct crypto_alg *alg = tfm->__crt_alg; + struct crypto_cipher *ncipher; + struct crypto_tfm *ntfm; + + if (alg->cra_init) + return ERR_PTR(-ENOSYS); + + if (unlikely(!crypto_mod_get(alg))) + return ERR_PTR(-ESTALE); + + ntfm = __crypto_alloc_tfmgfp(alg, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK, GFP_ATOMIC); + if (IS_ERR(ntfm)) { + crypto_mod_put(alg); + return ERR_CAST(ntfm); + } + + ntfm->crt_flags = tfm->crt_flags; + + ncipher = __crypto_cipher_cast(ntfm); + + return ncipher; +} +EXPORT_SYMBOL_GPL(crypto_clone_cipher); diff --git a/crypto/cmac.c b/crypto/cmac.c index f4a5d3bfb376..fce6b0f58e88 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -198,13 +198,14 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) return 0; } -static int cmac_init_tfm(struct crypto_tfm *tfm) +static int cmac_init_tfm(struct crypto_shash *tfm) { + struct shash_instance *inst = shash_alg_instance(tfm); + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); + struct crypto_cipher_spawn *spawn; struct crypto_cipher *cipher; - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); - struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + spawn = shash_instance_ctx(inst); cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); @@ -212,11 +213,26 @@ static int cmac_init_tfm(struct crypto_tfm *tfm) ctx->child = cipher; return 0; -}; +} + +static int cmac_clone_tfm(struct crypto_shash *tfm, struct crypto_shash *otfm) +{ + struct cmac_tfm_ctx *octx = crypto_shash_ctx(otfm); + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); + struct crypto_cipher *cipher; + + cipher = crypto_clone_cipher(octx->child); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; -static void cmac_exit_tfm(struct crypto_tfm *tfm) + return 0; +} + +static void cmac_exit_tfm(struct crypto_shash *tfm) { - struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); crypto_free_cipher(ctx->child); } @@ -274,13 +290,13 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) ~(crypto_tfm_ctx_alignment() - 1)) + alg->cra_blocksize * 2; - inst->alg.base.cra_init = cmac_init_tfm; - inst->alg.base.cra_exit = cmac_exit_tfm; - inst->alg.init = crypto_cmac_digest_init; inst->alg.update = crypto_cmac_digest_update; inst->alg.final = crypto_cmac_digest_final; inst->alg.setkey = crypto_cmac_digest_setkey; + inst->alg.init_tfm = cmac_init_tfm; + inst->alg.clone_tfm = cmac_clone_tfm; + inst->alg.exit_tfm = cmac_exit_tfm; inst->free = shash_free_singlespawn_instance; diff --git a/crypto/hmac.c b/crypto/hmac.c index 09a7872b4060..ea93f4c55f25 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -177,6 +177,7 @@ static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src) static void hmac_exit_tfm(struct crypto_shash *parent) { struct hmac_ctx *ctx = hmac_ctx(parent); + crypto_free_shash(ctx->hash); } diff --git a/crypto/internal.h b/crypto/internal.h index 8dd746b1130b..63e59240d5fb 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -18,9 +18,12 @@ #include <linux/numa.h> #include <linux/refcount.h> #include <linux/rwsem.h> +#include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/types.h> +struct akcipher_request; +struct crypto_akcipher; struct crypto_instance; struct crypto_template; @@ -32,6 +35,19 @@ struct crypto_larval { bool test_started; }; +struct crypto_akcipher_sync_data { + struct crypto_akcipher *tfm; + const void *src; + void *dst; + unsigned int slen; + unsigned int dlen; + + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist sg; + u8 *buf; +}; + enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, @@ -102,6 +118,8 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg); void crypto_remove_final(struct list_head *list); void crypto_shoot_alg(struct crypto_alg *alg); +struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, + u32 mask, gfp_t gfp); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); void *crypto_create_tfm_node(struct crypto_alg *alg, @@ -109,6 +127,10 @@ void *crypto_create_tfm_node(struct crypto_alg *alg, void *crypto_clone_tfm(const struct crypto_type *frontend, struct crypto_tfm *otfm); +int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data); +int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err); +int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm); + static inline void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) { diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index b9edfaa51b27..7d1463a1562a 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -2,7 +2,7 @@ * Non-physical true random number generator based on timing jitter -- * Linux Kernel Crypto API specific code * - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -37,6 +37,8 @@ * DAMAGE. */ +#include <crypto/hash.h> +#include <crypto/sha3.h> #include <linux/fips.h> #include <linux/kernel.h> #include <linux/module.h> @@ -46,6 +48,8 @@ #include "jitterentropy.h" +#define JENT_CONDITIONING_HASH "sha3-256-generic" + /*************************************************************************** * Helper function ***************************************************************************/ @@ -60,11 +64,6 @@ void jent_zfree(void *ptr) kfree_sensitive(ptr); } -void jent_memcpy(void *dest, const void *src, unsigned int n) -{ - memcpy(dest, src, n); -} - /* * Obtain a high-resolution time stamp value. The time stamp is used to measure * the execution time of a given code path and its variations. Hence, the time @@ -89,6 +88,92 @@ void jent_get_nstime(__u64 *out) tmp = ktime_get_ns(); *out = tmp; + jent_raw_hires_entropy_store(tmp); +} + +int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, + unsigned int addtl_len, __u64 hash_loop_cnt, + unsigned int stuck) +{ + struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state; + SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm); + u8 intermediary[SHA3_256_DIGEST_SIZE]; + __u64 j = 0; + int ret; + + desc->tfm = hash_state_desc->tfm; + + if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) { + pr_warn_ratelimited("Unexpected digest size\n"); + return -EINVAL; + } + + /* + * This loop fills a buffer which is injected into the entropy pool. + * The main reason for this loop is to execute something over which we + * can perform a timing measurement. The injection of the resulting + * data into the pool is performed to ensure the result is used and + * the compiler cannot optimize the loop away in case the result is not + * used at all. Yet that data is considered "additional information" + * considering the terminology from SP800-90A without any entropy. + * + * Note, it does not matter which or how much data you inject, we are + * interested in one Keccack1600 compression operation performed with + * the crypto_shash_final. + */ + for (j = 0; j < hash_loop_cnt; j++) { + ret = crypto_shash_init(desc) ?: + crypto_shash_update(desc, intermediary, + sizeof(intermediary)) ?: + crypto_shash_finup(desc, addtl, addtl_len, intermediary); + if (ret) + goto err; + } + + /* + * Inject the data from the previous loop into the pool. This data is + * not considered to contain any entropy, but it stirs the pool a bit. + */ + ret = crypto_shash_update(desc, intermediary, sizeof(intermediary)); + if (ret) + goto err; + + /* + * Insert the time stamp into the hash context representing the pool. + * + * If the time stamp is stuck, do not finally insert the value into the + * entropy pool. Although this operation should not do any harm even + * when the time stamp has no entropy, SP800-90B requires that any + * conditioning operation to have an identical amount of input data + * according to section 3.1.5. + */ + if (!stuck) { + ret = crypto_shash_update(hash_state_desc, (u8 *)&time, + sizeof(__u64)); + } + +err: + shash_desc_zero(desc); + memzero_explicit(intermediary, sizeof(intermediary)); + + return ret; +} + +int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len) +{ + struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state; + u8 jent_block[SHA3_256_DIGEST_SIZE]; + /* Obtain data from entropy pool and re-initialize it */ + int ret = crypto_shash_final(hash_state_desc, jent_block) ?: + crypto_shash_init(hash_state_desc) ?: + crypto_shash_update(hash_state_desc, jent_block, + sizeof(jent_block)); + + if (!ret && dst_len) + memcpy(dst, jent_block, dst_len); + + memzero_explicit(jent_block, sizeof(jent_block)); + return ret; } /*************************************************************************** @@ -98,32 +183,82 @@ void jent_get_nstime(__u64 *out) struct jitterentropy { spinlock_t jent_lock; struct rand_data *entropy_collector; + struct crypto_shash *tfm; + struct shash_desc *sdesc; }; -static int jent_kcapi_init(struct crypto_tfm *tfm) +static void jent_kcapi_cleanup(struct crypto_tfm *tfm) { struct jitterentropy *rng = crypto_tfm_ctx(tfm); - int ret = 0; - rng->entropy_collector = jent_entropy_collector_alloc(1, 0); - if (!rng->entropy_collector) - ret = -ENOMEM; + spin_lock(&rng->jent_lock); - spin_lock_init(&rng->jent_lock); - return ret; -} + if (rng->sdesc) { + shash_desc_zero(rng->sdesc); + kfree(rng->sdesc); + } + rng->sdesc = NULL; -static void jent_kcapi_cleanup(struct crypto_tfm *tfm) -{ - struct jitterentropy *rng = crypto_tfm_ctx(tfm); + if (rng->tfm) + crypto_free_shash(rng->tfm); + rng->tfm = NULL; - spin_lock(&rng->jent_lock); if (rng->entropy_collector) jent_entropy_collector_free(rng->entropy_collector); rng->entropy_collector = NULL; spin_unlock(&rng->jent_lock); } +static int jent_kcapi_init(struct crypto_tfm *tfm) +{ + struct jitterentropy *rng = crypto_tfm_ctx(tfm); + struct crypto_shash *hash; + struct shash_desc *sdesc; + int size, ret = 0; + + spin_lock_init(&rng->jent_lock); + + /* + * Use SHA3-256 as conditioner. We allocate only the generic + * implementation as we are not interested in high-performance. The + * execution time of the SHA3 operation is measured and adds to the + * Jitter RNG's unpredictable behavior. If we have a slower hash + * implementation, the execution timing variations are larger. When + * using a fast implementation, we would need to call it more often + * as its variations are lower. + */ + hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); + if (IS_ERR(hash)) { + pr_err("Cannot allocate conditioning digest\n"); + return PTR_ERR(hash); + } + rng->tfm = hash; + + size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); + sdesc = kmalloc(size, GFP_KERNEL); + if (!sdesc) { + ret = -ENOMEM; + goto err; + } + + sdesc->tfm = hash; + crypto_shash_init(sdesc); + rng->sdesc = sdesc; + + rng->entropy_collector = jent_entropy_collector_alloc(1, 0, sdesc); + if (!rng->entropy_collector) { + ret = -ENOMEM; + goto err; + } + + spin_lock_init(&rng->jent_lock); + return 0; + +err: + jent_kcapi_cleanup(tfm); + return ret; +} + static int jent_kcapi_random(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *rdata, unsigned int dlen) @@ -180,20 +315,34 @@ static struct rng_alg jent_alg = { .cra_module = THIS_MODULE, .cra_init = jent_kcapi_init, .cra_exit = jent_kcapi_cleanup, - } }; static int __init jent_mod_init(void) { + SHASH_DESC_ON_STACK(desc, tfm); + struct crypto_shash *tfm; int ret = 0; - ret = jent_entropy_init(); + jent_testing_init(); + + tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); + if (IS_ERR(tfm)) { + jent_testing_exit(); + return PTR_ERR(tfm); + } + + desc->tfm = tfm; + crypto_shash_init(desc); + ret = jent_entropy_init(desc); + shash_desc_zero(desc); + crypto_free_shash(tfm); if (ret) { /* Handle permanent health test error */ if (fips_enabled) panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret); + jent_testing_exit(); pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret); return -EFAULT; } @@ -202,6 +351,7 @@ static int __init jent_mod_init(void) static void __exit jent_mod_exit(void) { + jent_testing_exit(); crypto_unregister_rng(&jent_alg); } diff --git a/crypto/jitterentropy-testing.c b/crypto/jitterentropy-testing.c new file mode 100644 index 000000000000..5cb6a77b8e3b --- /dev/null +++ b/crypto/jitterentropy-testing.c @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Test interface for Jitter RNG. + * + * Copyright (C) 2023, Stephan Mueller <smueller@chronox.de> + */ + +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/uaccess.h> + +#include "jitterentropy.h" + +#define JENT_TEST_RINGBUFFER_SIZE (1<<10) +#define JENT_TEST_RINGBUFFER_MASK (JENT_TEST_RINGBUFFER_SIZE - 1) + +struct jent_testing { + u32 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE]; + u32 rb_reader; + atomic_t rb_writer; + atomic_t jent_testing_enabled; + spinlock_t lock; + wait_queue_head_t read_wait; +}; + +static struct dentry *jent_raw_debugfs_root = NULL; + +/*************************** Generic Data Handling ****************************/ + +/* + * boot variable: + * 0 ==> No boot test, gathering of runtime data allowed + * 1 ==> Boot test enabled and ready for collecting data, gathering runtime + * data is disabled + * 2 ==> Boot test completed and disabled, gathering of runtime data is + * disabled + */ + +static void jent_testing_reset(struct jent_testing *data) +{ + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + data->rb_reader = 0; + atomic_set(&data->rb_writer, 0); + spin_unlock_irqrestore(&data->lock, flags); +} + +static void jent_testing_data_init(struct jent_testing *data, u32 boot) +{ + /* + * The boot time testing implies we have a running test. If the + * caller wants to clear it, he has to unset the boot_test flag + * at runtime via sysfs to enable regular runtime testing + */ + if (boot) + return; + + jent_testing_reset(data); + atomic_set(&data->jent_testing_enabled, 1); + pr_warn("Enabling data collection\n"); +} + +static void jent_testing_fini(struct jent_testing *data, u32 boot) +{ + /* If we have boot data, we do not reset yet to allow data to be read */ + if (boot) + return; + + atomic_set(&data->jent_testing_enabled, 0); + jent_testing_reset(data); + pr_warn("Disabling data collection\n"); +} + +static bool jent_testing_store(struct jent_testing *data, u32 value, + u32 *boot) +{ + unsigned long flags; + + if (!atomic_read(&data->jent_testing_enabled) && (*boot != 1)) + return false; + + spin_lock_irqsave(&data->lock, flags); + + /* + * Disable entropy testing for boot time testing after ring buffer + * is filled. + */ + if (*boot) { + if (((u32)atomic_read(&data->rb_writer)) > + JENT_TEST_RINGBUFFER_SIZE) { + *boot = 2; + pr_warn_once("One time data collection test disabled\n"); + spin_unlock_irqrestore(&data->lock, flags); + return false; + } + + if (atomic_read(&data->rb_writer) == 1) + pr_warn("One time data collection test enabled\n"); + } + + data->jent_testing_rb[((u32)atomic_read(&data->rb_writer)) & + JENT_TEST_RINGBUFFER_MASK] = value; + atomic_inc(&data->rb_writer); + + spin_unlock_irqrestore(&data->lock, flags); + + if (wq_has_sleeper(&data->read_wait)) + wake_up_interruptible(&data->read_wait); + + return true; +} + +static bool jent_testing_have_data(struct jent_testing *data) +{ + return ((((u32)atomic_read(&data->rb_writer)) & + JENT_TEST_RINGBUFFER_MASK) != + (data->rb_reader & JENT_TEST_RINGBUFFER_MASK)); +} + +static int jent_testing_reader(struct jent_testing *data, u32 *boot, + u8 *outbuf, u32 outbuflen) +{ + unsigned long flags; + int collected_data = 0; + + jent_testing_data_init(data, *boot); + + while (outbuflen) { + u32 writer = (u32)atomic_read(&data->rb_writer); + + spin_lock_irqsave(&data->lock, flags); + + /* We have no data or reached the writer. */ + if (!writer || (writer == data->rb_reader)) { + + spin_unlock_irqrestore(&data->lock, flags); + + /* + * Now we gathered all boot data, enable regular data + * collection. + */ + if (*boot) { + *boot = 0; + goto out; + } + + wait_event_interruptible(data->read_wait, + jent_testing_have_data(data)); + if (signal_pending(current)) { + collected_data = -ERESTARTSYS; + goto out; + } + + continue; + } + + /* We copy out word-wise */ + if (outbuflen < sizeof(u32)) { + spin_unlock_irqrestore(&data->lock, flags); + goto out; + } + + memcpy(outbuf, &data->jent_testing_rb[data->rb_reader], + sizeof(u32)); + data->rb_reader++; + + spin_unlock_irqrestore(&data->lock, flags); + + outbuf += sizeof(u32); + outbuflen -= sizeof(u32); + collected_data += sizeof(u32); + } + +out: + jent_testing_fini(data, *boot); + return collected_data; +} + +static int jent_testing_extract_user(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos, + int (*reader)(u8 *outbuf, u32 outbuflen)) +{ + u8 *tmp, *tmp_aligned; + int ret = 0, large_request = (nbytes > 256); + + if (!nbytes) + return 0; + + /* + * The intention of this interface is for collecting at least + * 1000 samples due to the SP800-90B requirements. So, we make no + * effort in avoiding allocating more memory that actually needed + * by the user. Hence, we allocate sufficient memory to always hold + * that amount of data. + */ + tmp = kmalloc(JENT_TEST_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp_aligned = PTR_ALIGN(tmp, sizeof(u32)); + + while (nbytes) { + int i; + + if (large_request && need_resched()) { + if (signal_pending(current)) { + if (ret == 0) + ret = -ERESTARTSYS; + break; + } + schedule(); + } + + i = min_t(int, nbytes, JENT_TEST_RINGBUFFER_SIZE); + i = reader(tmp_aligned, i); + if (i <= 0) { + if (i < 0) + ret = i; + break; + } + if (copy_to_user(buf, tmp_aligned, i)) { + ret = -EFAULT; + break; + } + + nbytes -= i; + buf += i; + ret += i; + } + + kfree_sensitive(tmp); + + if (ret > 0) + *ppos += ret; + + return ret; +} + +/************** Raw High-Resolution Timer Entropy Data Handling **************/ + +static u32 boot_raw_hires_test = 0; +module_param(boot_raw_hires_test, uint, 0644); +MODULE_PARM_DESC(boot_raw_hires_test, + "Enable gathering boot time high resolution timer entropy of the first Jitter RNG entropy events"); + +static struct jent_testing jent_raw_hires = { + .rb_reader = 0, + .rb_writer = ATOMIC_INIT(0), + .lock = __SPIN_LOCK_UNLOCKED(jent_raw_hires.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait) +}; + +int jent_raw_hires_entropy_store(__u32 value) +{ + return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test); +} +EXPORT_SYMBOL(jent_raw_hires_entropy_store); + +static int jent_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return jent_testing_reader(&jent_raw_hires, &boot_raw_hires_test, + outbuf, outbuflen); +} + +static ssize_t jent_raw_hires_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return jent_testing_extract_user(file, to, count, ppos, + jent_raw_hires_entropy_reader); +} + +static const struct file_operations jent_raw_hires_fops = { + .owner = THIS_MODULE, + .read = jent_raw_hires_read, +}; + +/******************************* Initialization *******************************/ + +void jent_testing_init(void) +{ + jent_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + + debugfs_create_file_unsafe("jent_raw_hires", 0400, + jent_raw_debugfs_root, NULL, + &jent_raw_hires_fops); +} +EXPORT_SYMBOL(jent_testing_init); + +void jent_testing_exit(void) +{ + debugfs_remove_recursive(jent_raw_debugfs_root); +} +EXPORT_SYMBOL(jent_testing_exit); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 22f48bf4c6f5..c7d7f2caa779 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -2,7 +2,7 @@ * Non-physical true random number generator based on timing jitter -- * Jitter RNG standalone code. * - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2020 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023 * * Design * ====== @@ -47,7 +47,7 @@ /* * This Jitterentropy RNG is based on the jitterentropy library - * version 2.2.0 provided at https://www.chronox.de/jent.html + * version 3.4.0 provided at https://www.chronox.de/jent.html */ #ifdef __OPTIMIZE__ @@ -57,21 +57,22 @@ typedef unsigned long long __u64; typedef long long __s64; typedef unsigned int __u32; +typedef unsigned char u8; #define NULL ((void *) 0) /* The entropy pool */ struct rand_data { + /* SHA3-256 is used as conditioner */ +#define DATA_SIZE_BITS 256 /* all data values that are vital to maintain the security * of the RNG are marked as SENSITIVE. A user must not * access that information while the RNG executes its loops to * calculate the next random value. */ - __u64 data; /* SENSITIVE Actual random number */ - __u64 old_data; /* SENSITIVE Previous random number */ - __u64 prev_time; /* SENSITIVE Previous time stamp */ -#define DATA_SIZE_BITS ((sizeof(__u64)) * 8) - __u64 last_delta; /* SENSITIVE stuck test */ - __s64 last_delta2; /* SENSITIVE stuck test */ - unsigned int osr; /* Oversample rate */ + void *hash_state; /* SENSITIVE hash state entropy pool */ + __u64 prev_time; /* SENSITIVE Previous time stamp */ + __u64 last_delta; /* SENSITIVE stuck test */ + __s64 last_delta2; /* SENSITIVE stuck test */ + unsigned int osr; /* Oversample rate */ #define JENT_MEMORY_BLOCKS 64 #define JENT_MEMORY_BLOCKSIZE 32 #define JENT_MEMORY_ACCESSLOOPS 128 @@ -117,7 +118,6 @@ struct rand_data { * zero). */ #define JENT_ESTUCK 8 /* Too many stuck results during init. */ #define JENT_EHEALTH 9 /* Health test failed during initialization */ -#define JENT_ERCT 10 /* RCT failed during initialization */ /* * The output n bits can receive more than n bits of min entropy, of course, @@ -302,15 +302,13 @@ static int jent_permanent_health_failure(struct rand_data *ec) * an entropy collection. * * Input: - * @ec entropy collector struct -- may be NULL * @bits is the number of low bits of the timer to consider * @min is the number of bits we shift the timer value to the right at * the end to make sure we have a guaranteed minimum value * * @return Newly calculated loop counter */ -static __u64 jent_loop_shuffle(struct rand_data *ec, - unsigned int bits, unsigned int min) +static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min) { __u64 time = 0; __u64 shuffle = 0; @@ -318,12 +316,7 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, unsigned int mask = (1<<bits) - 1; jent_get_nstime(&time); - /* - * Mix the current state of the random number into the shuffle - * calculation to balance that shuffle a bit more. - */ - if (ec) - time ^= ec->data; + /* * We fold the time value as much as possible to ensure that as many * bits of the time stamp are included as possible. @@ -345,81 +338,32 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, * execution time jitter * * This function injects the individual bits of the time value into the - * entropy pool using an LFSR. + * entropy pool using a hash. * - * The code is deliberately inefficient with respect to the bit shifting - * and shall stay that way. This function is the root cause why the code - * shall be compiled without optimization. This function not only acts as - * folding operation, but this function's execution is used to measure - * the CPU execution time jitter. Any change to the loop in this function - * implies that careful retesting must be done. - * - * @ec [in] entropy collector struct - * @time [in] time stamp to be injected - * @loop_cnt [in] if a value not equal to 0 is set, use the given value as - * number of loops to perform the folding - * @stuck [in] Is the time stamp identified as stuck? + * ec [in] entropy collector + * time [in] time stamp to be injected + * stuck [in] Is the time stamp identified as stuck? * * Output: - * updated ec->data - * - * @return Number of loops the folding operation is performed + * updated hash context in the entropy collector or error code */ -static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt, - int stuck) +static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck) { - unsigned int i; - __u64 j = 0; - __u64 new = 0; -#define MAX_FOLD_LOOP_BIT 4 -#define MIN_FOLD_LOOP_BIT 0 - __u64 fold_loop_cnt = - jent_loop_shuffle(ec, MAX_FOLD_LOOP_BIT, MIN_FOLD_LOOP_BIT); - - /* - * testing purposes -- allow test app to set the counter, not - * needed during runtime - */ - if (loop_cnt) - fold_loop_cnt = loop_cnt; - for (j = 0; j < fold_loop_cnt; j++) { - new = ec->data; - for (i = 1; (DATA_SIZE_BITS) >= i; i++) { - __u64 tmp = time << (DATA_SIZE_BITS - i); - - tmp = tmp >> (DATA_SIZE_BITS - 1); - - /* - * Fibonacci LSFR with polynomial of - * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is - * primitive according to - * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf - * (the shift values are the polynomial values minus one - * due to counting bits from 0 to 63). As the current - * position is always the LSB, the polynomial only needs - * to shift data in from the left without wrap. - */ - tmp ^= ((new >> 63) & 1); - tmp ^= ((new >> 60) & 1); - tmp ^= ((new >> 55) & 1); - tmp ^= ((new >> 30) & 1); - tmp ^= ((new >> 27) & 1); - tmp ^= ((new >> 22) & 1); - new <<= 1; - new ^= tmp; - } - } - - /* - * If the time stamp is stuck, do not finally insert the value into - * the entropy pool. Although this operation should not do any harm - * even when the time stamp has no entropy, SP800-90B requires that - * any conditioning operation (SP800-90B considers the LFSR to be a - * conditioning operation) to have an identical amount of input - * data according to section 3.1.5. - */ - if (!stuck) - ec->data = new; +#define SHA3_HASH_LOOP (1<<3) + struct { + int rct_count; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + } addtl = { + ec->rct_count, + ec->apt_observations, + ec->apt_count, + ec->apt_base + }; + + return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl), + SHA3_HASH_LOOP, stuck); } /* @@ -453,7 +397,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) #define MAX_ACC_LOOP_BIT 7 #define MIN_ACC_LOOP_BIT 0 __u64 acc_loop_cnt = - jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); + jent_loop_shuffle(MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); if (NULL == ec || NULL == ec->mem) return; @@ -521,14 +465,15 @@ static int jent_measure_jitter(struct rand_data *ec) stuck = jent_stuck(ec, current_delta); /* Now call the next noise sources which also injects the data */ - jent_lfsr_time(ec, current_delta, 0, stuck); + if (jent_condition_data(ec, current_delta, stuck)) + stuck = 1; return stuck; } /* * Generator of one 64 bit random number - * Function fills rand_data->data + * Function fills rand_data->hash_state * * @ec [in] Reference to entropy collector */ @@ -575,7 +520,7 @@ static void jent_gen_entropy(struct rand_data *ec) * @return 0 when request is fulfilled or an error * * The following error codes can occur: - * -1 entropy_collector is NULL + * -1 entropy_collector is NULL or the generation failed * -2 Intermittent health failure * -3 Permanent health failure */ @@ -605,7 +550,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, * Perform startup health tests and return permanent * error if it fails. */ - if (jent_entropy_init()) + if (jent_entropy_init(ec->hash_state)) return -3; return -2; @@ -615,7 +560,8 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, tocopy = (DATA_SIZE_BITS / 8); else tocopy = len; - jent_memcpy(p, &ec->data, tocopy); + if (jent_read_random_block(ec->hash_state, p, tocopy)) + return -1; len -= tocopy; p += tocopy; @@ -629,7 +575,8 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, ***************************************************************************/ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, - unsigned int flags) + unsigned int flags, + void *hash_state) { struct rand_data *entropy_collector; @@ -656,6 +603,8 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, osr = 1; /* minimum sampling rate is 1 */ entropy_collector->osr = osr; + entropy_collector->hash_state = hash_state; + /* fill the data pad with non-zero values */ jent_gen_entropy(entropy_collector); @@ -669,7 +618,7 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector) jent_zfree(entropy_collector); } -int jent_entropy_init(void) +int jent_entropy_init(void *hash_state) { int i; __u64 delta_sum = 0; @@ -682,6 +631,7 @@ int jent_entropy_init(void) /* Required for RCT */ ec.osr = 1; + ec.hash_state = hash_state; /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These @@ -719,7 +669,7 @@ int jent_entropy_init(void) /* Invoke core entropy collection logic */ jent_get_nstime(&time); ec.prev_time = time; - jent_lfsr_time(&ec, time, 0, 0); + jent_condition_data(&ec, time, 0); jent_get_nstime(&time2); /* test whether timer works */ @@ -762,14 +712,12 @@ int jent_entropy_init(void) if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { jent_apt_reset(&ec, delta & JENT_APT_WORD_MASK); - if (jent_health_failure(&ec)) - return JENT_EHEALTH; } } - /* Validate RCT */ - if (jent_rct_failure(&ec)) - return JENT_ERCT; + /* Validate health test result */ + if (jent_health_failure(&ec)) + return JENT_EHEALTH; /* test whether we have an increasing timer */ if (!(time2 > time)) diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index 5cc583f6bc6b..4c92176ea2b1 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -2,14 +2,28 @@ extern void *jent_zalloc(unsigned int len); extern void jent_zfree(void *ptr); -extern void jent_memcpy(void *dest, const void *src, unsigned int n); extern void jent_get_nstime(__u64 *out); +extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, + unsigned int addtl_len, __u64 hash_loop_cnt, + unsigned int stuck); +int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len); struct rand_data; -extern int jent_entropy_init(void); +extern int jent_entropy_init(void *hash_state); extern int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len); extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr, - unsigned int flags); + unsigned int flags, + void *hash_state); extern void jent_entropy_collector_free(struct rand_data *entropy_collector); + +#ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE +int jent_raw_hires_entropy_store(__u32 value); +void jent_testing_init(void); +void jent_testing_exit(void); +#else /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */ +static inline int jent_raw_hires_entropy_store(__u32 value) { return 0; } +static inline void jent_testing_init(void) { } +static inline void jent_testing_exit(void) { } +#endif /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */ diff --git a/crypto/rsa.c b/crypto/rsa.c index c50f2d2a4d06..c79613cdce6e 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -205,6 +205,32 @@ static int rsa_check_key_length(unsigned int len) return -EINVAL; } +static int rsa_check_exponent_fips(MPI e) +{ + MPI e_max = NULL; + + /* check if odd */ + if (!mpi_test_bit(e, 0)) { + return -EINVAL; + } + + /* check if 2^16 < e < 2^256. */ + if (mpi_cmp_ui(e, 65536) <= 0) { + return -EINVAL; + } + + e_max = mpi_alloc(0); + mpi_set_bit(e_max, 256); + + if (mpi_cmp(e, e_max) >= 0) { + mpi_free(e_max); + return -EINVAL; + } + + mpi_free(e_max); + return 0; +} + static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { @@ -232,6 +258,11 @@ static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, return -EINVAL; } + if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) { + rsa_free_mpi_key(mpi_key); + return -EINVAL; + } + return 0; err: @@ -290,6 +321,11 @@ static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, return -EINVAL; } + if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) { + rsa_free_mpi_key(mpi_key); + return -EINVAL; + } + return 0; err: diff --git a/crypto/shash.c b/crypto/shash.c index 717b42df3495..1fadb6b59bdc 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -597,7 +597,7 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash) return hash; } - if (!alg->clone_tfm) + if (!alg->clone_tfm && (alg->init_tfm || alg->base.cra_init)) return ERR_PTR(-ENOSYS); nhash = crypto_clone_tfm(&crypto_shash_type, tfm); @@ -606,10 +606,12 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash) nhash->descsize = hash->descsize; - err = alg->clone_tfm(nhash, hash); - if (err) { - crypto_free_shash(nhash); - return ERR_PTR(err); + if (alg->clone_tfm) { + err = alg->clone_tfm(nhash, hash); + if (err) { + crypto_free_shash(nhash); + return ERR_PTR(err); + } } return nhash; diff --git a/crypto/sig.c b/crypto/sig.c new file mode 100644 index 000000000000..b48c18ec65cd --- /dev/null +++ b/crypto/sig.c @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Signature Algorithm + * + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#include <crypto/akcipher.h> +#include <crypto/internal/sig.h> +#include <linux/cryptouser.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/seq_file.h> +#include <linux/string.h> +#include <net/netlink.h> + +#include "internal.h" + +#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e + +static const struct crypto_type crypto_sig_type; + +static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_sig, base); +} + +static int crypto_sig_init_tfm(struct crypto_tfm *tfm) +{ + if (tfm->__crt_alg->cra_type != &crypto_sig_type) + return crypto_init_akcipher_ops_sig(tfm); + + return 0; +} + +static void __maybe_unused crypto_sig_show(struct seq_file *m, + struct crypto_alg *alg) +{ + seq_puts(m, "type : sig\n"); +} + +static int __maybe_unused crypto_sig_report(struct sk_buff *skb, + struct crypto_alg *alg) +{ + struct crypto_report_akcipher rsig = {}; + + strscpy(rsig.type, "sig", sizeof(rsig.type)); + + return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig); +} + +static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb, + struct crypto_alg *alg) +{ + struct crypto_stat_akcipher rsig = {}; + + strscpy(rsig.type, "sig", sizeof(rsig.type)); + + return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig); +} + +static const struct crypto_type crypto_sig_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_sig_init_tfm, +#ifdef CONFIG_PROC_FS + .show = crypto_sig_show, +#endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) + .report = crypto_sig_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_sig_report_stat, +#endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_SIG_MASK, + .type = CRYPTO_ALG_TYPE_SIG, + .tfmsize = offsetof(struct crypto_sig, base), +}; + +struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_sig_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_sig); + +int crypto_sig_maxsize(struct crypto_sig *tfm) +{ + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); + + return crypto_akcipher_maxsize(*ctx); +} +EXPORT_SYMBOL_GPL(crypto_sig_maxsize); + +int crypto_sig_sign(struct crypto_sig *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); + struct crypto_akcipher_sync_data data = { + .tfm = *ctx, + .src = src, + .dst = dst, + .slen = slen, + .dlen = dlen, + }; + + return crypto_akcipher_sync_prep(&data) ?: + crypto_akcipher_sync_post(&data, + crypto_akcipher_sign(data.req)); +} +EXPORT_SYMBOL_GPL(crypto_sig_sign); + +int crypto_sig_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) +{ + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); + struct crypto_akcipher_sync_data data = { + .tfm = *ctx, + .src = src, + .slen = slen, + .dlen = dlen, + }; + int err; + + err = crypto_akcipher_sync_prep(&data); + if (err) + return err; + + memcpy(data.buf + slen, digest, dlen); + + return crypto_akcipher_sync_post(&data, + crypto_akcipher_verify(data.req)); +} +EXPORT_SYMBOL_GPL(crypto_sig_verify); + +int crypto_sig_set_pubkey(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); + + return crypto_akcipher_set_pub_key(*ctx, key, keylen); +} +EXPORT_SYMBOL_GPL(crypto_sig_set_pubkey); + +int crypto_sig_set_privkey(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); + + return crypto_akcipher_set_priv_key(*ctx, key, keylen); +} +EXPORT_SYMBOL_GPL(crypto_sig_set_privkey); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Public Key Signature Algorithms"); diff --git a/crypto/sm2.c b/crypto/sm2.c index ed9307dac3d1..285b3cb7c0bc 100644 --- a/crypto/sm2.c +++ b/crypto/sm2.c @@ -13,11 +13,14 @@ #include <crypto/internal/akcipher.h> #include <crypto/akcipher.h> #include <crypto/hash.h> -#include <crypto/sm3.h> #include <crypto/rng.h> #include <crypto/sm2.h> #include "sm2signature.asn1.h" +/* The default user id as specified in GM/T 0009-2012 */ +#define SM2_DEFAULT_USERID "1234567812345678" +#define SM2_DEFAULT_USERID_LEN 16 + #define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8) struct ecc_domain_parms { @@ -60,6 +63,9 @@ static const struct ecc_domain_parms sm2_ecp = { .h = 1 }; +static int __sm2_set_pub_key(struct mpi_ec_ctx *ec, + const void *key, unsigned int keylen); + static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec) { const struct ecc_domain_parms *ecp = &sm2_ecp; @@ -213,12 +219,13 @@ int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, return 0; } -static int sm2_z_digest_update(struct sm3_state *sctx, - MPI m, unsigned int pbytes) +static int sm2_z_digest_update(struct shash_desc *desc, + MPI m, unsigned int pbytes) { static const unsigned char zero[32]; unsigned char *in; unsigned int inlen; + int err; in = mpi_get_buffer(m, &inlen, NULL); if (!in) @@ -226,21 +233,22 @@ static int sm2_z_digest_update(struct sm3_state *sctx, if (inlen < pbytes) { /* padding with zero */ - sm3_update(sctx, zero, pbytes - inlen); - sm3_update(sctx, in, inlen); + err = crypto_shash_update(desc, zero, pbytes - inlen) ?: + crypto_shash_update(desc, in, inlen); } else if (inlen > pbytes) { /* skip the starting zero */ - sm3_update(sctx, in + inlen - pbytes, pbytes); + err = crypto_shash_update(desc, in + inlen - pbytes, pbytes); } else { - sm3_update(sctx, in, inlen); + err = crypto_shash_update(desc, in, inlen); } kfree(in); - return 0; + return err; } -static int sm2_z_digest_update_point(struct sm3_state *sctx, - MPI_POINT point, struct mpi_ec_ctx *ec, unsigned int pbytes) +static int sm2_z_digest_update_point(struct shash_desc *desc, + MPI_POINT point, struct mpi_ec_ctx *ec, + unsigned int pbytes) { MPI x, y; int ret = -EINVAL; @@ -248,50 +256,68 @@ static int sm2_z_digest_update_point(struct sm3_state *sctx, x = mpi_new(0); y = mpi_new(0); - if (!mpi_ec_get_affine(x, y, point, ec) && - !sm2_z_digest_update(sctx, x, pbytes) && - !sm2_z_digest_update(sctx, y, pbytes)) - ret = 0; + ret = mpi_ec_get_affine(x, y, point, ec) ? -EINVAL : + sm2_z_digest_update(desc, x, pbytes) ?: + sm2_z_digest_update(desc, y, pbytes); mpi_free(x); mpi_free(y); return ret; } -int sm2_compute_z_digest(struct crypto_akcipher *tfm, - const unsigned char *id, size_t id_len, - unsigned char dgst[SM3_DIGEST_SIZE]) +int sm2_compute_z_digest(struct shash_desc *desc, + const void *key, unsigned int keylen, void *dgst) { - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - uint16_t bits_len; - unsigned char entl[2]; - struct sm3_state sctx; + struct mpi_ec_ctx *ec; + unsigned int bits_len; unsigned int pbytes; + u8 entl[2]; + int err; - if (id_len > (USHRT_MAX / 8) || !ec->Q) - return -EINVAL; + ec = kmalloc(sizeof(*ec), GFP_KERNEL); + if (!ec) + return -ENOMEM; + + err = __sm2_set_pub_key(ec, key, keylen); + if (err) + goto out_free_ec; - bits_len = (uint16_t)(id_len * 8); + bits_len = SM2_DEFAULT_USERID_LEN * 8; entl[0] = bits_len >> 8; entl[1] = bits_len & 0xff; pbytes = MPI_NBYTES(ec->p); /* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */ - sm3_init(&sctx); - sm3_update(&sctx, entl, 2); - sm3_update(&sctx, id, id_len); - - if (sm2_z_digest_update(&sctx, ec->a, pbytes) || - sm2_z_digest_update(&sctx, ec->b, pbytes) || - sm2_z_digest_update_point(&sctx, ec->G, ec, pbytes) || - sm2_z_digest_update_point(&sctx, ec->Q, ec, pbytes)) - return -EINVAL; + err = crypto_shash_init(desc); + if (err) + goto out_deinit_ec; - sm3_final(&sctx, dgst); - return 0; + err = crypto_shash_update(desc, entl, 2); + if (err) + goto out_deinit_ec; + + err = crypto_shash_update(desc, SM2_DEFAULT_USERID, + SM2_DEFAULT_USERID_LEN); + if (err) + goto out_deinit_ec; + + err = sm2_z_digest_update(desc, ec->a, pbytes) ?: + sm2_z_digest_update(desc, ec->b, pbytes) ?: + sm2_z_digest_update_point(desc, ec->G, ec, pbytes) ?: + sm2_z_digest_update_point(desc, ec->Q, ec, pbytes); + if (err) + goto out_deinit_ec; + + err = crypto_shash_final(desc, dgst); + +out_deinit_ec: + sm2_ec_ctx_deinit(ec); +out_free_ec: + kfree(ec); + return err; } -EXPORT_SYMBOL(sm2_compute_z_digest); +EXPORT_SYMBOL_GPL(sm2_compute_z_digest); static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s) { @@ -391,6 +417,14 @@ static int sm2_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); + + return __sm2_set_pub_key(ec, key, keylen); + +} + +static int __sm2_set_pub_key(struct mpi_ec_ctx *ec, + const void *key, unsigned int keylen) +{ MPI a; int rc; |