summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/842.c6
-rw-r--r--crypto/Kconfig82
-rw-r--r--crypto/Makefile22
-rw-r--r--crypto/acompress.c410
-rw-r--r--crypto/adiantum.c2
-rw-r--r--crypto/aead.c1
-rw-r--r--crypto/aegis128-core.c2
-rw-r--r--crypto/aes_generic.c2
-rw-r--r--crypto/ahash.c783
-rw-r--r--crypto/akcipher.c1
-rw-r--r--crypto/algapi.c82
-rw-r--r--crypto/algboss.c10
-rw-r--r--crypto/algif_aead.c101
-rw-r--r--crypto/algif_hash.c4
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c2
-rw-r--r--crypto/api.c37
-rw-r--r--crypto/arc4.c2
-rw-r--r--crypto/aria_generic.c2
-rw-r--r--crypto/asymmetric_keys/public_key.c36
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c3
-rw-r--r--crypto/authenc.c34
-rw-r--r--crypto/authencesn.c40
-rw-r--r--crypto/blake2b_generic.c33
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/ccm.c65
-rw-r--r--crypto/chacha.c260
-rw-r--r--crypto/chacha20poly1305.c321
-rw-r--r--crypto/chacha_generic.c139
-rw-r--r--crypto/cmac.c94
-rw-r--r--crypto/crc32.c (renamed from crypto/crc32_generic.c)2
-rw-r--r--crypto/crc32c.c (renamed from crypto/crc32c_generic.c)2
-rw-r--r--crypto/cryptd.c2
-rw-r--r--crypto/crypto_engine.c31
-rw-r--r--crypto/crypto_null.c72
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/cts.c2
-rw-r--r--crypto/curve25519-generic.c2
-rw-r--r--crypto/deflate.c355
-rw-r--r--crypto/des_generic.c2
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/ecdh.c2
-rw-r--r--crypto/ecdsa-p1363.c6
-rw-r--r--crypto/ecdsa-x962.c5
-rw-r--r--crypto/ecdsa.c4
-rw-r--r--crypto/echainiv.c20
-rw-r--r--crypto/ecrdsa.c2
-rw-r--r--crypto/essiv.c5
-rw-r--r--crypto/fcrypt.c2
-rw-r--r--crypto/fips.c2
-rw-r--r--crypto/gcm.c43
-rw-r--r--crypto/geniv.c13
-rw-r--r--crypto/ghash-generic.c58
-rw-r--r--crypto/hctr2.c2
-rw-r--r--crypto/hkdf.c2
-rw-r--r--crypto/hmac.c398
-rw-r--r--crypto/internal.h9
-rw-r--r--crypto/kdf_sp800108.c2
-rw-r--r--crypto/khazad.c2
-rw-r--r--crypto/kpp.c1
-rw-r--r--crypto/krb5enc.c2
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/lskcipher.c1
-rw-r--r--crypto/lz4.c6
-rw-r--r--crypto/lz4hc.c6
-rw-r--r--crypto/lzo-rle.c6
-rw-r--r--crypto/lzo.c6
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c104
-rw-r--r--crypto/michael_mic.c2
-rw-r--r--crypto/nhpoly1305.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/pcrypt.c2
-rw-r--r--crypto/poly1305_generic.c149
-rw-r--r--crypto/polyval-generic.c118
-rw-r--r--crypto/rmd160.c90
-rw-r--r--crypto/rng.c1
-rw-r--r--crypto/rsa.c2
-rw-r--r--crypto/rsassa-pkcs1.c2
-rw-r--r--crypto/scatterwalk.c274
-rw-r--r--crypto/scompress.c252
-rw-r--r--crypto/seed.c2
-rw-r--r--crypto/seqiv.c19
-rw-r--r--crypto/serpent_generic.c2
-rw-r--r--crypto/sha1_generic.c35
-rw-r--r--crypto/sha256.c283
-rw-r--r--crypto/sha256_generic.c110
-rw-r--r--crypto/sha3_generic.c101
-rw-r--r--crypto/sha512_generic.c52
-rw-r--r--crypto/shash.c276
-rw-r--r--crypto/sig.c10
-rw-r--r--crypto/skcipher.c262
-rw-r--r--crypto/sm3.c246
-rw-r--r--crypto/sm3_generic.c33
-rw-r--r--crypto/sm4_generic.c2
-rw-r--r--crypto/streebog_generic.c73
-rw-r--r--crypto/tcrypt.c239
-rw-r--r--crypto/tcrypt.h4
-rw-r--r--crypto/tea.c2
-rw-r--r--crypto/testmgr.c307
-rw-r--r--crypto/testmgr.h288
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/wp512.c2
-rw-r--r--crypto/xcbc.c94
-rw-r--r--crypto/xctr.c2
-rw-r--r--crypto/xts.c6
-rw-r--r--crypto/xxhash_generic.c2
-rw-r--r--crypto/zstd.c2
114 files changed, 3158 insertions, 3977 deletions
diff --git a/crypto/842.c b/crypto/842.c
index 5fb37a925989..8c257c40e2b9 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -23,10 +23,6 @@
#include <linux/module.h>
#include <linux/sw842.h>
-struct crypto842_ctx {
- void *wmem; /* working memory for compress */
-};
-
static void *crypto842_alloc_ctx(void)
{
void *ctx;
@@ -74,7 +70,7 @@ static int __init crypto842_mod_init(void)
{
return crypto_register_scomp(&scomp);
}
-subsys_initcall(crypto842_mod_init);
+module_init(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
diff --git a/crypto/Kconfig b/crypto/Kconfig
index dbf97c4e7c59..e9fee7818e27 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -25,7 +25,7 @@ menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
- depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
+ depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && CRYPTO_SELFTESTS
depends on (MODULE_SIG || !MODULES)
help
This option enables the fips boot option which is
@@ -143,16 +143,17 @@ config CRYPTO_ACOMP
config CRYPTO_HKDF
tristate
- select CRYPTO_SHA256 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
- select CRYPTO_SHA512 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_SHA256 if CRYPTO_SELFTESTS
+ select CRYPTO_SHA512 if CRYPTO_SELFTESTS
select CRYPTO_HASH2
config CRYPTO_MANAGER
- tristate "Cryptographic algorithm manager"
+ tristate
+ default CRYPTO_ALGAPI if CRYPTO_SELFTESTS
select CRYPTO_MANAGER2
help
- Create default cryptographic template instantiations such as
- cbc(aes).
+ This provides the support for instantiating templates such as
+ cbc(aes), and the support for the crypto self-tests.
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
@@ -173,35 +174,27 @@ config CRYPTO_USER
Userspace configuration for cryptographic instantiations such as
cbc(aes).
-config CRYPTO_MANAGER_DISABLE_TESTS
- bool "Disable run-time self tests"
- default y
+config CRYPTO_SELFTESTS
+ bool "Enable cryptographic self-tests"
+ depends on DEBUG_KERNEL
help
- Disable run-time self tests that normally take place at
- algorithm registration.
+ Enable the cryptographic self-tests.
-config CRYPTO_MANAGER_EXTRA_TESTS
- bool "Enable extra run-time crypto self tests"
- depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER
- help
- Enable extra run-time self tests of registered crypto algorithms,
- including randomized fuzz tests.
+ The cryptographic self-tests run at boot time, or at algorithm
+ registration time if algorithms are dynamically loaded later.
- This is intended for developer use only, as these tests take much
- longer to run than the normal self tests.
+ This is primarily intended for developer use. It should not be
+ enabled in production kernels, unless you are trying to use these
+ tests to fulfill a FIPS testing requirement.
config CRYPTO_NULL
tristate "Null algorithms"
- select CRYPTO_NULL2
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
help
These are 'Null' algorithms, used by IPsec, which do nothing.
-config CRYPTO_NULL2
- tristate
- select CRYPTO_ALGAPI2
- select CRYPTO_SKCIPHER2
- select CRYPTO_HASH2
-
config CRYPTO_PCRYPT
tristate "Parallel crypto engine"
depends on SMP
@@ -228,7 +221,6 @@ config CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
- select CRYPTO_NULL
help
Authenc: Combined mode wrapper for IPsec.
@@ -240,18 +232,21 @@ config CRYPTO_KRB5ENC
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
- select CRYPTO_NULL
help
Combined hash and cipher support for Kerberos 5 RFC3961 simplified
profile. This is required for Kerberos 5-style encryption, used by
sunrpc/NFS and rxrpc/AFS.
-config CRYPTO_TEST
- tristate "Testing module"
+config CRYPTO_BENCHMARK
+ tristate "Crypto benchmarking module"
depends on m || EXPERT
select CRYPTO_MANAGER
help
- Quick & dirty crypto test module.
+ Quick & dirty crypto benchmarking module.
+
+ This is mainly intended for use by people developing cryptographic
+ algorithms in the kernel. It should not be enabled in production
+ kernels.
config CRYPTO_SIMD
tristate
@@ -634,8 +629,8 @@ config CRYPTO_ARC4
config CRYPTO_CHACHA20
tristate "ChaCha"
+ select CRYPTO_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_LIB_CHACHA_INTERNAL
select CRYPTO_SKCIPHER
help
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms
@@ -784,8 +779,8 @@ config CRYPTO_AEGIS128_SIMD
config CRYPTO_CHACHA20POLY1305
tristate "ChaCha20-Poly1305"
select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
select CRYPTO_AEAD
+ select CRYPTO_LIB_POLY1305
select CRYPTO_MANAGER
help
ChaCha20 stream cipher and Poly1305 authenticator combined
@@ -806,7 +801,6 @@ config CRYPTO_GCM
select CRYPTO_CTR
select CRYPTO_AEAD
select CRYPTO_GHASH
- select CRYPTO_NULL
select CRYPTO_MANAGER
help
GCM (Galois/Counter Mode) authenticated encryption mode and GMAC
@@ -817,7 +811,6 @@ config CRYPTO_GCM
config CRYPTO_GENIV
tristate
select CRYPTO_AEAD
- select CRYPTO_NULL
select CRYPTO_MANAGER
select CRYPTO_RNG_DEFAULT
@@ -953,18 +946,6 @@ config CRYPTO_POLYVAL
This is used in HCTR2. It is not a general-purpose
cryptographic hash function.
-config CRYPTO_POLY1305
- tristate "Poly1305"
- select CRYPTO_HASH
- select CRYPTO_LIB_POLY1305_GENERIC
- select CRYPTO_LIB_POLY1305_INTERNAL
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
- It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
- in IETF protocols. This is the portable C implementation of Poly1305.
-
config CRYPTO_RMD160
tristate "RIPEMD-160"
select CRYPTO_HASH
@@ -994,6 +975,7 @@ config CRYPTO_SHA256
tristate "SHA-224 and SHA-256"
select CRYPTO_HASH
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA256_GENERIC
help
SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3)
@@ -1012,13 +994,10 @@ config CRYPTO_SHA3
help
SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3)
-config CRYPTO_SM3
- tristate
-
config CRYPTO_SM3_GENERIC
tristate "SM3 (ShangMi 3)"
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3)
@@ -1406,7 +1385,6 @@ config CRYPTO_USER_API_AEAD
depends on NET
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
- select CRYPTO_NULL
select CRYPTO_USER_API
help
Enable the userspace interface for AEAD cipher algorithms.
diff --git a/crypto/Makefile b/crypto/Makefile
index 0e6ab5ffd3f7..017df3a2e4bb 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -71,15 +71,15 @@ obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
-obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
+obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
-obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
+obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
+CFLAGS_sha256.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
-obj-$(CONFIG_CRYPTO_SM3) += sm3.o
obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o
obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
@@ -148,14 +148,16 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o
-obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
-obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
+obj-$(CONFIG_CRYPTO_CHACHA20) += chacha.o
+CFLAGS_chacha.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
-obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
-obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
-CFLAGS_crc32c_generic.o += -DARCH=$(ARCH)
-CFLAGS_crc32_generic.o += -DARCH=$(ARCH)
+obj-$(CONFIG_CRYPTO_CRC32C) += crc32c-cryptoapi.o
+crc32c-cryptoapi-y := crc32c.o
+CFLAGS_crc32c.o += -DARCH=$(ARCH)
+obj-$(CONFIG_CRYPTO_CRC32) += crc32-cryptoapi.o
+crc32-cryptoapi-y := crc32.o
+CFLAGS_crc32.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
@@ -172,7 +174,7 @@ KASAN_SANITIZE_jitterentropy.o = n
UBSAN_SANITIZE_jitterentropy.o = n
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o
-obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
index f7a3fbe5447e..be28cbfd22e3 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -8,20 +8,32 @@
*/
#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
-#include <linux/errno.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/page-flags.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
struct crypto_scomp;
+enum {
+ ACOMP_WALK_SLEEP = 1 << 0,
+ ACOMP_WALK_SRC_LINEAR = 1 << 1,
+ ACOMP_WALK_DST_LINEAR = 1 << 2,
+};
+
static const struct crypto_type crypto_acomp_type;
static void acomp_reqchain_done(void *data, int err);
@@ -65,7 +77,7 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
alg->exit(acomp);
if (acomp_is_async(acomp))
- crypto_free_acomp(acomp->fb);
+ crypto_free_acomp(crypto_acomp_fb(acomp));
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
@@ -75,8 +87,6 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *fb = NULL;
int err;
- acomp->fb = acomp;
-
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
@@ -90,12 +100,12 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
goto out_free_fb;
- acomp->fb = fb;
+ tfm->fb = crypto_acomp_tfm(fb);
}
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
- acomp->reqsize = alg->reqsize;
+ acomp->reqsize = alg->base.cra_reqsize;
acomp->base.exit = crypto_acomp_exit_tfm;
@@ -136,6 +146,7 @@ static const struct crypto_type crypto_acomp_type = {
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
+ .algsize = offsetof(struct acomp_alg, base),
};
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
@@ -161,7 +172,6 @@ static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
state->data = req->base.data;
req->base.complete = cplt;
req->base.data = state;
- state->req0 = req;
}
static void acomp_restore_req(struct acomp_req *req)
@@ -172,23 +182,16 @@ static void acomp_restore_req(struct acomp_req *req)
req->base.data = state->data;
}
-static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
+static void acomp_reqchain_virt(struct acomp_req *req)
{
- struct acomp_req *req = state->cur;
+ struct acomp_req_chain *state = &req->chain;
unsigned int slen = req->slen;
unsigned int dlen = req->dlen;
- req->base.err = err;
- state = &req->chain;
-
if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
acomp_request_set_src_dma(req, state->src, slen);
- else if (state->flags & CRYPTO_ACOMP_REQ_SRC_FOLIO)
- acomp_request_set_src_folio(req, state->sfolio, state->soff, slen);
if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
acomp_request_set_dst_dma(req, state->dst, dlen);
- else if (state->flags & CRYPTO_ACOMP_REQ_DST_FOLIO)
- acomp_request_set_dst_folio(req, state->dfolio, state->doff, dlen);
}
static void acomp_virt_to_sg(struct acomp_req *req)
@@ -196,9 +199,7 @@ static void acomp_virt_to_sg(struct acomp_req *req)
struct acomp_req_chain *state = &req->chain;
state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
- CRYPTO_ACOMP_REQ_DST_VIRT |
- CRYPTO_ACOMP_REQ_SRC_FOLIO |
- CRYPTO_ACOMP_REQ_DST_FOLIO);
+ CRYPTO_ACOMP_REQ_DST_VIRT);
if (acomp_request_src_isvirt(req)) {
unsigned int slen = req->slen;
@@ -207,17 +208,6 @@ static void acomp_virt_to_sg(struct acomp_req *req)
state->src = svirt;
sg_init_one(&state->ssg, svirt, slen);
acomp_request_set_src_sg(req, &state->ssg, slen);
- } else if (acomp_request_src_isfolio(req)) {
- struct folio *folio = req->sfolio;
- unsigned int slen = req->slen;
- size_t off = req->soff;
-
- state->sfolio = folio;
- state->soff = off;
- sg_init_table(&state->ssg, 1);
- sg_set_page(&state->ssg, folio_page(folio, off / PAGE_SIZE),
- slen, off % PAGE_SIZE);
- acomp_request_set_src_sg(req, &state->ssg, slen);
}
if (acomp_request_dst_isvirt(req)) {
@@ -227,39 +217,15 @@ static void acomp_virt_to_sg(struct acomp_req *req)
state->dst = dvirt;
sg_init_one(&state->dsg, dvirt, dlen);
acomp_request_set_dst_sg(req, &state->dsg, dlen);
- } else if (acomp_request_dst_isfolio(req)) {
- struct folio *folio = req->dfolio;
- unsigned int dlen = req->dlen;
- size_t off = req->doff;
-
- state->dfolio = folio;
- state->doff = off;
- sg_init_table(&state->dsg, 1);
- sg_set_page(&state->dsg, folio_page(folio, off / PAGE_SIZE),
- dlen, off % PAGE_SIZE);
- acomp_request_set_src_sg(req, &state->dsg, dlen);
}
}
-static int acomp_do_nondma(struct acomp_req_chain *state,
- struct acomp_req *req)
+static int acomp_do_nondma(struct acomp_req *req, bool comp)
{
- u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT |
- CRYPTO_ACOMP_REQ_SRC_NONDMA |
- CRYPTO_ACOMP_REQ_DST_VIRT |
- CRYPTO_ACOMP_REQ_DST_NONDMA;
- ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req));
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
int err;
- acomp_request_set_callback(fbreq, req->base.flags, NULL, NULL);
- fbreq->base.flags &= ~keep;
- fbreq->base.flags |= req->base.flags & keep;
- fbreq->src = req->src;
- fbreq->dst = req->dst;
- fbreq->slen = req->slen;
- fbreq->dlen = req->dlen;
-
- if (state->op == crypto_acomp_reqtfm(req)->compress)
+ if (comp)
err = crypto_acomp_compress(fbreq);
else
err = crypto_acomp_decompress(fbreq);
@@ -268,114 +234,74 @@ static int acomp_do_nondma(struct acomp_req_chain *state,
return err;
}
-static int acomp_do_one_req(struct acomp_req_chain *state,
- struct acomp_req *req)
+static int acomp_do_one_req(struct acomp_req *req, bool comp)
{
- state->cur = req;
-
if (acomp_request_isnondma(req))
- return acomp_do_nondma(state, req);
+ return acomp_do_nondma(req, comp);
acomp_virt_to_sg(req);
- return state->op(req);
+ return comp ? crypto_acomp_reqtfm(req)->compress(req) :
+ crypto_acomp_reqtfm(req)->decompress(req);
}
-static int acomp_reqchain_finish(struct acomp_req *req0, int err, u32 mask)
+static int acomp_reqchain_finish(struct acomp_req *req, int err)
{
- struct acomp_req_chain *state = req0->base.data;
- struct acomp_req *req = state->cur;
- struct acomp_req *n;
-
- acomp_reqchain_virt(state, err);
-
- if (req != req0)
- list_add_tail(&req->base.list, &req0->base.list);
-
- list_for_each_entry_safe(req, n, &state->head, base.list) {
- list_del_init(&req->base.list);
-
- req->base.flags &= mask;
- req->base.complete = acomp_reqchain_done;
- req->base.data = state;
-
- err = acomp_do_one_req(state, req);
-
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head))
- err = -EBUSY;
- goto out;
- }
-
- if (err == -EBUSY)
- goto out;
-
- acomp_reqchain_virt(state, err);
- list_add_tail(&req->base.list, &req0->base.list);
- }
-
- acomp_restore_req(req0);
-
-out:
+ acomp_reqchain_virt(req);
+ acomp_restore_req(req);
return err;
}
static void acomp_reqchain_done(void *data, int err)
{
- struct acomp_req_chain *state = data;
- crypto_completion_t compl = state->compl;
+ struct acomp_req *req = data;
+ crypto_completion_t compl;
- data = state->data;
+ compl = req->chain.compl;
+ data = req->chain.data;
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head))
- return;
+ if (err == -EINPROGRESS)
goto notify;
- }
- err = acomp_reqchain_finish(state->req0, err,
- CRYPTO_TFM_REQ_MAY_BACKLOG);
- if (err == -EBUSY)
- return;
+ err = acomp_reqchain_finish(req, err);
notify:
compl(data, err);
}
-static int acomp_do_req_chain(struct acomp_req *req,
- int (*op)(struct acomp_req *req))
+static int acomp_do_req_chain(struct acomp_req *req, bool comp)
{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct acomp_req_chain *state;
int err;
- if (crypto_acomp_req_chain(tfm) ||
- (!acomp_request_chained(req) && acomp_request_issg(req)))
- return op(req);
-
acomp_save_req(req, acomp_reqchain_done);
- state = req->base.data;
-
- state->op = op;
- state->src = NULL;
- INIT_LIST_HEAD(&state->head);
- list_splice_init(&req->base.list, &state->head);
- err = acomp_do_one_req(state, req);
+ err = acomp_do_one_req(req, comp);
if (err == -EBUSY || err == -EINPROGRESS)
- return -EBUSY;
+ return err;
- return acomp_reqchain_finish(req, err, ~0);
+ return acomp_reqchain_finish(req, err);
}
int crypto_acomp_compress(struct acomp_req *req)
{
- return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->compress(req);
+ return acomp_do_req_chain(req, true);
}
EXPORT_SYMBOL_GPL(crypto_acomp_compress);
int crypto_acomp_decompress(struct acomp_req *req)
{
- return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->decompress(req);
+ return acomp_do_req_chain(req, false);
}
EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
@@ -434,5 +360,229 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count)
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
+static void acomp_stream_workfn(struct work_struct *work)
+{
+ struct crypto_acomp_streams *s =
+ container_of(work, struct crypto_acomp_streams, stream_work);
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu;
+
+ for_each_cpu(cpu, &s->stream_want) {
+ struct crypto_acomp_stream *ps;
+ void *ctx;
+
+ ps = per_cpu_ptr(streams, cpu);
+ if (ps->ctx)
+ continue;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx))
+ break;
+
+ spin_lock_bh(&ps->lock);
+ ps->ctx = ctx;
+ spin_unlock_bh(&ps->lock);
+
+ cpumask_clear_cpu(cpu, &s->stream_want);
+ }
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ void (*free_ctx)(void *);
+ int i;
+
+ s->streams = NULL;
+ if (!streams)
+ return;
+
+ cancel_work_sync(&s->stream_work);
+ free_ctx = s->free_ctx;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
+
+ if (!ps->ctx)
+ continue;
+
+ free_ctx(ps->ctx);
+ }
+
+ free_percpu(streams);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
+
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams;
+ struct crypto_acomp_stream *ps;
+ unsigned int i;
+ void *ctx;
+
+ if (s->streams)
+ return 0;
+
+ streams = alloc_percpu(struct crypto_acomp_stream);
+ if (!streams)
+ return -ENOMEM;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx)) {
+ free_percpu(streams);
+ return PTR_ERR(ctx);
+ }
+
+ i = cpumask_first(cpu_possible_mask);
+ ps = per_cpu_ptr(streams, i);
+ ps->ctx = ctx;
+
+ for_each_possible_cpu(i) {
+ ps = per_cpu_ptr(streams, i);
+ spin_lock_init(&ps->lock);
+ }
+
+ s->streams = streams;
+
+ INIT_WORK(&s->stream_work, acomp_stream_workfn);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu = raw_smp_processor_id();
+ struct crypto_acomp_stream *ps;
+
+ ps = per_cpu_ptr(streams, cpu);
+ spin_lock_bh(&ps->lock);
+ if (likely(ps->ctx))
+ return ps;
+ spin_unlock(&ps->lock);
+
+ cpumask_set_cpu(cpu, &s->stream_want);
+ schedule_work(&s->stream_work);
+
+ ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
+ spin_lock(&ps->lock);
+ return ps;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used)
+{
+ walk->slen -= used;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
+ scatterwalk_advance(&walk->in, used);
+ else
+ scatterwalk_done_src(&walk->in, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_src);
+
+void acomp_walk_done_dst(struct acomp_walk *walk, int used)
+{
+ walk->dlen -= used;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR))
+ scatterwalk_advance(&walk->out, used);
+ else
+ scatterwalk_done_dst(&walk->out, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
+
+int acomp_walk_next_src(struct acomp_walk *walk)
+{
+ unsigned int slen = walk->slen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
+ walk->in.offset);
+ return min(slen, max);
+ }
+
+ return slen ? scatterwalk_next(&walk->in, slen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_src);
+
+int acomp_walk_next_dst(struct acomp_walk *walk)
+{
+ unsigned int dlen = walk->dlen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
+ walk->out.offset);
+ return min(dlen, max);
+ }
+
+ return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
+
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic)
+{
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ walk->slen = req->slen;
+ walk->dlen = req->dlen;
+
+ if (!walk->slen || !walk->dlen)
+ return -EINVAL;
+
+ walk->flags = 0;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags |= ACOMP_WALK_SLEEP;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
+ walk->flags |= ACOMP_WALK_SRC_LINEAR;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
+ walk->flags |= ACOMP_WALK_DST_LINEAR;
+
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.sg = (void *)req->svirt;
+ walk->in.offset = 0;
+ } else
+ scatterwalk_start(&walk->in, src);
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.sg = (void *)req->dvirt;
+ walk->out.offset = 0;
+ } else
+ scatterwalk_start(&walk->out, dst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_virt);
+
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp)
+{
+ struct acomp_req *nreq;
+
+ nreq = container_of(crypto_request_clone(&req->base, total, gfp),
+ struct acomp_req, base);
+ if (nreq == req)
+ return req;
+
+ if (req->src == &req->chain.ssg)
+ nreq->src = &nreq->chain.ssg;
+ if (req->dst == &req->chain.dsg)
+ nreq->dst = &nreq->chain.dsg;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(acomp_request_clone);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index c3ef583598b4..a6bca877c3c7 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -639,7 +639,7 @@ static void __exit adiantum_module_exit(void)
crypto_unregister_template(&adiantum_tmpl);
}
-subsys_initcall(adiantum_module_init);
+module_init(adiantum_module_init);
module_exit(adiantum_module_exit);
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
diff --git a/crypto/aead.c b/crypto/aead.c
index 12f5b42171af..5d14b775036e 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -186,6 +186,7 @@ static const struct crypto_type crypto_aead_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
.tfmsize = offsetof(struct crypto_aead, base),
+ .algsize = offsetof(struct aead_alg, base),
};
int crypto_grab_aead(struct crypto_aead_spawn *spawn,
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 72f6ee1345ef..ca80d861345d 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -566,7 +566,7 @@ static void __exit crypto_aegis128_module_exit(void)
crypto_unregister_aead(&crypto_aegis128_alg_generic);
}
-subsys_initcall(crypto_aegis128_module_init);
+module_init(crypto_aegis128_module_init);
module_exit(crypto_aegis128_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 3c66d425c97b..85d2e78c8ef2 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1311,7 +1311,7 @@ static void __exit aes_fini(void)
crypto_unregister_alg(&aes_alg);
}
-subsys_initcall(aes_init);
+module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 2d9eec2b2b1c..e10bc2659ae4 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
@@ -42,26 +42,46 @@ struct crypto_hash_walk {
struct scatterlist *sg;
};
-struct ahash_save_req_state {
- struct list_head head;
- struct ahash_request *req0;
- struct ahash_request *cur;
- int (*op)(struct ahash_request *req);
+static int ahash_def_finup(struct ahash_request *req);
+
+static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_AHASH_ALG_BLOCK_ONLY;
+}
+
+static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
+}
+
+static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_ALG_NEED_FALLBACK;
+}
+
+static inline void ahash_op_done(void *data, int err,
+ int (*finish)(struct ahash_request *, int))
+{
+ struct ahash_request *areq = data;
crypto_completion_t compl;
- void *data;
- struct scatterlist sg;
- const u8 *src;
- u8 *page;
- unsigned int offset;
- unsigned int nbytes;
-};
-static void ahash_reqchain_done(void *data, int err);
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
-static void ahash_restore_req(struct ahash_request *req);
-static void ahash_def_finup_done1(void *data, int err);
-static int ahash_def_finup_finish1(struct ahash_request *req, int err);
-static int ahash_def_finup(struct ahash_request *req);
+ compl = areq->saved_complete;
+ data = areq->saved_data;
+ if (err == -EINPROGRESS)
+ goto out;
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = finish(areq, err);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ compl(data, err);
+}
static int hash_walk_next(struct crypto_hash_walk *walk)
{
@@ -266,7 +286,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
CRYPTO_TFM_NEED_KEY);
- crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0;
}
@@ -303,6 +322,9 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
int err;
err = alg->setkey(tfm, key, keylen);
+ if (!err && crypto_ahash_need_fallback(tfm))
+ err = crypto_ahash_setkey(crypto_ahash_fb(tfm),
+ key, keylen);
if (unlikely(err)) {
ahash_set_needkey(tfm, alg);
return err;
@@ -313,421 +335,261 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
-static bool ahash_request_hasvirt(struct ahash_request *req)
-{
- return ahash_request_isvirt(req);
-}
-
-static int ahash_reqchain_virt(struct ahash_save_req_state *state,
- int err, u32 mask)
-{
- struct ahash_request *req = state->cur;
-
- for (;;) {
- unsigned len = state->nbytes;
-
- req->base.err = err;
-
- if (!state->offset)
- break;
-
- if (state->offset == len || err) {
- u8 *result = req->result;
-
- ahash_request_set_virt(req, state->src, result, len);
- state->offset = 0;
- break;
- }
-
- len -= state->offset;
-
- len = min(PAGE_SIZE, len);
- memcpy(state->page, state->src + state->offset, len);
- state->offset += len;
- req->nbytes = len;
-
- err = state->op(req);
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head) ||
- state->offset < state->nbytes)
- err = -EBUSY;
- break;
- }
-
- if (err == -EBUSY)
- break;
- }
-
- return err;
-}
-
-static int ahash_reqchain_finish(struct ahash_request *req0,
- struct ahash_save_req_state *state,
- int err, u32 mask)
-{
- struct ahash_request *req = state->cur;
- struct crypto_ahash *tfm;
- struct ahash_request *n;
- bool update;
- u8 *page;
-
- err = ahash_reqchain_virt(state, err, mask);
- if (err == -EINPROGRESS || err == -EBUSY)
- goto out;
-
- if (req != req0)
- list_add_tail(&req->base.list, &req0->base.list);
-
- tfm = crypto_ahash_reqtfm(req);
- update = state->op == crypto_ahash_alg(tfm)->update;
-
- list_for_each_entry_safe(req, n, &state->head, base.list) {
- list_del_init(&req->base.list);
-
- req->base.flags &= mask;
- req->base.complete = ahash_reqchain_done;
- req->base.data = state;
- state->cur = req;
-
- if (update && ahash_request_isvirt(req) && req->nbytes) {
- unsigned len = req->nbytes;
- u8 *result = req->result;
-
- state->src = req->svirt;
- state->nbytes = len;
-
- len = min(PAGE_SIZE, len);
-
- memcpy(state->page, req->svirt, len);
- state->offset = len;
-
- ahash_request_set_crypt(req, &state->sg, result, len);
- }
-
- err = state->op(req);
-
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head) ||
- state->offset < state->nbytes)
- err = -EBUSY;
- goto out;
- }
-
- if (err == -EBUSY)
- goto out;
-
- err = ahash_reqchain_virt(state, err, mask);
- if (err == -EINPROGRESS || err == -EBUSY)
- goto out;
-
- list_add_tail(&req->base.list, &req0->base.list);
- }
-
- page = state->page;
- if (page) {
- memset(page, 0, PAGE_SIZE);
- free_page((unsigned long)page);
- }
- ahash_restore_req(req0);
-
-out:
- return err;
-}
-
-static void ahash_reqchain_done(void *data, int err)
-{
- struct ahash_save_req_state *state = data;
- crypto_completion_t compl = state->compl;
-
- data = state->data;
-
- if (err == -EINPROGRESS) {
- if (!list_empty(&state->head) || state->offset < state->nbytes)
- return;
- goto notify;
- }
-
- err = ahash_reqchain_finish(state->req0, state, err,
- CRYPTO_TFM_REQ_MAY_BACKLOG);
- if (err == -EBUSY)
- return;
-
-notify:
- compl(data, err);
-}
-
static int ahash_do_req_chain(struct ahash_request *req,
- int (*op)(struct ahash_request *req))
+ int (*const *op)(struct ahash_request *req))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- bool update = op == crypto_ahash_alg(tfm)->update;
- struct ahash_save_req_state *state;
- struct ahash_save_req_state state0;
- u8 *page = NULL;
int err;
- if (crypto_ahash_req_chain(tfm) ||
- (!ahash_request_chained(req) &&
- (!update || !ahash_request_isvirt(req))))
- return op(req);
-
- if (update && ahash_request_hasvirt(req)) {
- gfp_t gfp;
- u32 flags;
-
- flags = ahash_request_flags(req);
- gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- page = (void *)__get_free_page(gfp);
- err = -ENOMEM;
- if (!page)
- goto out_set_chain;
- }
-
- state = &state0;
- if (ahash_is_async(tfm)) {
- err = ahash_save_req(req, ahash_reqchain_done);
- if (err)
- goto out_free_page;
+ if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req))
+ return (*op)(req);
- state = req->base.data;
- }
+ if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE)
+ return -ENOSYS;
- state->op = op;
- state->cur = req;
- state->page = page;
- state->offset = 0;
- state->nbytes = 0;
- INIT_LIST_HEAD(&state->head);
+ {
+ u8 state[HASH_MAX_STATESIZE];
- if (page)
- sg_init_one(&state->sg, page, PAGE_SIZE);
+ if (op == &crypto_ahash_alg(tfm)->digest) {
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+ err = crypto_ahash_digest(req);
+ goto out_no_state;
+ }
- if (update && ahash_request_isvirt(req) && req->nbytes) {
- unsigned len = req->nbytes;
- u8 *result = req->result;
+ err = crypto_ahash_export(req, state);
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+ err = err ?: crypto_ahash_import(req, state);
- state->src = req->svirt;
- state->nbytes = len;
+ if (op == &crypto_ahash_alg(tfm)->finup) {
+ err = err ?: crypto_ahash_finup(req);
+ goto out_no_state;
+ }
- len = min(PAGE_SIZE, len);
+ err = err ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, state);
- memcpy(page, req->svirt, len);
- state->offset = len;
+ ahash_request_set_tfm(req, tfm);
+ return err ?: crypto_ahash_import(req, state);
- ahash_request_set_crypt(req, &state->sg, result, len);
+out_no_state:
+ ahash_request_set_tfm(req, tfm);
+ return err;
}
-
- err = op(req);
- if (err == -EBUSY || err == -EINPROGRESS)
- return -EBUSY;
-
- return ahash_reqchain_finish(req, state, err, ~0);
-
-out_free_page:
- free_page((unsigned long)page);
-
-out_set_chain:
- req->base.err = err;
- return err;
}
int crypto_ahash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash)) {
- int err;
-
- err = crypto_shash_init(prepare_shash_desc(req, tfm));
- req->base.err = err;
- return err;
- }
-
+ if (likely(tfm->using_shash))
+ return crypto_shash_init(prepare_shash_desc(req, tfm));
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_ahash_block_only(tfm)) {
+ u8 *buf = ahash_request_ctx(req);
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
+ buf += crypto_ahash_reqsize(tfm) - 1;
+ *buf = 0;
+ }
+ return crypto_ahash_alg(tfm)->init(req);
}
EXPORT_SYMBOL_GPL(crypto_ahash_init);
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_save_req_state *state;
- gfp_t gfp;
- u32 flags;
-
- if (!ahash_is_async(tfm))
- return 0;
-
- flags = ahash_request_flags(req);
- gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
- state = kmalloc(sizeof(*state), gfp);
- if (!state)
- return -ENOMEM;
-
- state->compl = req->base.complete;
- state->data = req->base.data;
+ req->saved_complete = req->base.complete;
+ req->saved_data = req->base.data;
req->base.complete = cplt;
- req->base.data = state;
- state->req0 = req;
-
- return 0;
+ req->base.data = req;
}
static void ahash_restore_req(struct ahash_request *req)
{
- struct ahash_save_req_state *state;
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(req);
- if (!ahash_is_async(tfm))
- return;
-
- state = req->base.data;
-
- req->base.complete = state->compl;
- req->base.data = state->data;
- kfree(state);
+ req->base.complete = req->saved_complete;
+ req->base.data = req->saved_data;
}
-int crypto_ahash_update(struct ahash_request *req)
+static int ahash_update_finish(struct ahash_request *req, int err)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (likely(tfm->using_shash)) {
- int err;
-
- err = shash_ahash_update(req, ahash_request_ctx(req));
- req->base.err = err;
- return err;
+ bool nonzero = crypto_ahash_final_nonzero(tfm);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen;
+ u8 *buf;
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen) {
+ req->src = req->sg_head + 1;
+ if (sg_is_chain(req->src))
+ req->src = sg_chain_ptr(req->src);
}
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
-}
-EXPORT_SYMBOL_GPL(crypto_ahash_update);
+ req->nbytes += nonzero - blen;
-int crypto_ahash_final(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ blen = err < 0 ? 0 : err + nonzero;
+ if (ahash_request_isvirt(req))
+ memcpy(buf, req->svirt + req->nbytes - blen, blen);
+ else
+ memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen);
+ *blenp = blen;
- if (likely(tfm->using_shash)) {
- int err;
+ ahash_restore_req(req);
- err = crypto_shash_final(ahash_request_ctx(req), req->result);
- req->base.err = err;
- return err;
- }
+ return err;
+}
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
+static void ahash_update_done(void *data, int err)
+{
+ ahash_op_done(data, err, ahash_update_finish);
}
-EXPORT_SYMBOL_GPL(crypto_ahash_final);
-int crypto_ahash_finup(struct ahash_request *req)
+int crypto_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool nonzero = crypto_ahash_final_nonzero(tfm);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen, err;
+ u8 *buf;
- if (likely(tfm->using_shash)) {
- int err;
-
- err = shash_ahash_finup(req, ahash_request_ctx(req));
- req->base.err = err;
- return err;
- }
+ if (likely(tfm->using_shash))
+ return shash_ahash_update(req, ahash_request_ctx(req));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (!crypto_ahash_block_only(tfm))
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
- if (!crypto_ahash_alg(tfm)->finup ||
- (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)))
- return ahash_def_finup(req);
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
-}
-EXPORT_SYMBOL_GPL(crypto_ahash_finup);
+ if (blen + req->nbytes < bs + nonzero) {
+ if (ahash_request_isvirt(req))
+ memcpy(buf + blen, req->svirt, req->nbytes);
+ else
+ memcpy_from_sglist(buf + blen, req->src, 0,
+ req->nbytes);
-static int ahash_def_digest_finish(struct ahash_request *req, int err)
-{
- struct crypto_ahash *tfm;
+ *blenp += req->nbytes;
+ return 0;
+ }
- if (err)
- goto out;
+ if (blen) {
+ memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+ sg_set_buf(req->sg_head, buf, blen);
+ if (req->src != req->sg_head + 1)
+ sg_chain(req->sg_head, 2, req->src);
+ req->src = req->sg_head;
+ req->nbytes += blen;
+ }
+ req->nbytes -= nonzero;
- tfm = crypto_ahash_reqtfm(req);
- if (ahash_is_async(tfm))
- req->base.complete = ahash_def_finup_done1;
+ ahash_save_req(req, ahash_update_done);
- err = crypto_ahash_update(req);
+ err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
- return ahash_def_finup_finish1(req, err);
-
-out:
- ahash_restore_req(req);
- return err;
+ return ahash_update_finish(req, err);
}
+EXPORT_SYMBOL_GPL(crypto_ahash_update);
-static void ahash_def_digest_done(void *data, int err)
+static int ahash_finup_finish(struct ahash_request *req, int err)
{
- struct ahash_save_req_state *state0 = data;
- struct ahash_save_req_state state;
- struct ahash_request *areq;
-
- state = *state0;
- areq = state.req0;
- if (err == -EINPROGRESS)
- goto out;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen;
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+
+ if (blen) {
+ if (sg_is_last(req->src))
+ req->src = NULL;
+ else {
+ req->src = req->sg_head + 1;
+ if (sg_is_chain(req->src))
+ req->src = sg_chain_ptr(req->src);
+ }
+ req->nbytes -= blen;
+ }
- areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_restore_req(req);
- err = ahash_def_digest_finish(areq, err);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
+ return err;
+}
-out:
- state.compl(state.data, err);
+static void ahash_finup_done(void *data, int err)
+{
+ ahash_op_done(data, err, ahash_finup_finish);
}
-static int ahash_def_digest(struct ahash_request *req)
+int crypto_ahash_finup(struct ahash_request *req)
{
- int err;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen, err;
+ u8 *buf;
- err = ahash_save_req(req, ahash_def_digest_done);
- if (err)
- return err;
+ if (likely(tfm->using_shash))
+ return shash_ahash_finup(req, ahash_request_ctx(req));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (!crypto_ahash_alg(tfm)->finup)
+ return ahash_def_finup(req);
+ if (!crypto_ahash_block_only(tfm))
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen) {
+ memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+ sg_set_buf(req->sg_head, buf, blen);
+ if (!req->src)
+ sg_mark_end(req->sg_head);
+ else if (req->src != req->sg_head + 1)
+ sg_chain(req->sg_head, 2, req->src);
+ req->src = req->sg_head;
+ req->nbytes += blen;
+ }
- err = crypto_ahash_init(req);
+ ahash_save_req(req, ahash_finup_done);
+
+ err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
- return ahash_def_digest_finish(req, err);
+ return ahash_finup_finish(req, err);
}
+EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash)) {
- int err;
-
- err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
- req->base.err = err;
- return err;
- }
-
- if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
- return ahash_def_digest(req);
-
+ if (likely(tfm->using_shash))
+ return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
-
- return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
static void ahash_def_finup_done2(void *data, int err)
{
- struct ahash_save_req_state *state = data;
- struct ahash_request *areq = state->req0;
+ struct ahash_request *areq = data;
if (err == -EINPROGRESS)
return;
@@ -738,14 +600,10 @@ static void ahash_def_finup_done2(void *data, int err)
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
- struct crypto_ahash *tfm;
-
if (err)
goto out;
- tfm = crypto_ahash_reqtfm(req);
- if (ahash_is_async(tfm))
- req->base.complete = ahash_def_finup_done2;
+ req->base.complete = ahash_def_finup_done2;
err = crypto_ahash_final(req);
if (err == -EINPROGRESS || err == -EBUSY)
@@ -758,32 +616,14 @@ out:
static void ahash_def_finup_done1(void *data, int err)
{
- struct ahash_save_req_state *state0 = data;
- struct ahash_save_req_state state;
- struct ahash_request *areq;
-
- state = *state0;
- areq = state.req0;
- if (err == -EINPROGRESS)
- goto out;
-
- areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = ahash_def_finup_finish1(areq, err);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
-
-out:
- state.compl(state.data, err);
+ ahash_op_done(data, err, ahash_def_finup_finish1);
}
static int ahash_def_finup(struct ahash_request *req)
{
int err;
- err = ahash_save_req(req, ahash_def_finup_done1);
- if (err)
- return err;
+ ahash_save_req(req, ahash_def_finup_done1);
err = crypto_ahash_update(req);
if (err == -EINPROGRESS || err == -EBUSY)
@@ -792,16 +632,47 @@ static int ahash_def_finup(struct ahash_request *req)
return ahash_def_finup_finish1(req, err);
}
+int crypto_ahash_export_core(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_export_core(ahash_request_ctx(req), out);
+ return crypto_ahash_alg(tfm)->export_core(req, out);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_export_core);
+
int crypto_ahash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash))
return crypto_shash_export(ahash_request_ctx(req), out);
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int plen = crypto_ahash_blocksize(tfm) + 1;
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ unsigned int ss = crypto_ahash_statesize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ memcpy(out + ss - plen, buf + reqsize - plen, plen);
+ }
return crypto_ahash_alg(tfm)->export(req, out);
}
EXPORT_SYMBOL_GPL(crypto_ahash_export);
+int crypto_ahash_import_core(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_import_core(prepare_shash_desc(req, tfm),
+ in);
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return crypto_ahash_alg(tfm)->import_core(req, in);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_import_core);
+
int crypto_ahash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -810,6 +681,12 @@ int crypto_ahash_import(struct ahash_request *req, const void *in)
return crypto_shash_import(prepare_shash_desc(req, tfm), in);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ buf[reqsize - 1] = 0;
+ }
return crypto_ahash_alg(tfm)->import(req, in);
}
EXPORT_SYMBOL_GPL(crypto_ahash_import);
@@ -819,26 +696,73 @@ static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
- alg->exit_tfm(hash);
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ else if (tfm->__crt_alg->cra_exit)
+ tfm->__crt_alg->cra_exit(tfm);
+
+ if (crypto_ahash_need_fallback(hash))
+ crypto_free_ahash(crypto_ahash_fb(hash));
}
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
+ struct crypto_ahash *fb = NULL;
+ int err;
crypto_ahash_set_statesize(hash, alg->halg.statesize);
- crypto_ahash_set_reqsize(hash, alg->reqsize);
+ crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm));
if (tfm->__crt_alg->cra_type == &crypto_shash_type)
return crypto_init_ahash_using_shash(tfm);
+ if (crypto_ahash_need_fallback(hash)) {
+ fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash),
+ CRYPTO_ALG_REQ_VIRT,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_REQ_VIRT |
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ tfm->fb = crypto_ahash_tfm(fb);
+ }
+
ahash_set_needkey(hash, alg);
- if (alg->exit_tfm)
- tfm->exit = crypto_ahash_exit_tfm;
+ tfm->exit = crypto_ahash_exit_tfm;
+
+ if (alg->init_tfm)
+ err = alg->init_tfm(hash);
+ else if (tfm->__crt_alg->cra_init)
+ err = tfm->__crt_alg->cra_init(tfm);
+ else
+ return 0;
+
+ if (err)
+ goto out_free_sync_hash;
+
+ if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) >
+ MAX_SYNC_HASH_REQSIZE)
+ goto out_exit_tfm;
- return alg->init_tfm ? alg->init_tfm(hash) : 0;
+ BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE);
+ if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE)
+ crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE);
+
+ return 0;
+
+out_exit_tfm:
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ else if (tfm->__crt_alg->cra_exit)
+ tfm->__crt_alg->cra_exit(tfm);
+ err = -EINVAL;
+out_free_sync_hash:
+ crypto_free_ahash(fb);
+ return err;
}
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
@@ -897,6 +821,7 @@ static const struct crypto_type crypto_ahash_type = {
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
.tfmsize = offsetof(struct crypto_ahash, base),
+ .algsize = offsetof(struct ahash_alg, halg.base),
};
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
@@ -921,7 +846,7 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
-static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
{
struct crypto_alg *alg = &halg->base;
@@ -930,11 +855,13 @@ static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
{
struct hash_alg_common *halg = crypto_hash_alg_common(hash);
struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_ahash *fb = NULL;
struct crypto_ahash *nhash;
struct ahash_alg *alg;
int err;
@@ -964,28 +891,52 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
err = PTR_ERR(shash);
goto out_free_nhash;
}
+ crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash;
nhash->using_shash = true;
*nctx = shash;
return nhash;
}
+ if (crypto_ahash_need_fallback(hash)) {
+ fb = crypto_clone_ahash(crypto_ahash_fb(hash));
+ err = PTR_ERR(fb);
+ if (IS_ERR(fb))
+ goto out_free_nhash;
+
+ crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb);
+ }
+
err = -ENOSYS;
alg = crypto_ahash_alg(hash);
if (!alg->clone_tfm)
- goto out_free_nhash;
+ goto out_free_fb;
err = alg->clone_tfm(nhash, hash);
if (err)
- goto out_free_nhash;
+ goto out_free_fb;
+
+ crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm;
return nhash;
+out_free_fb:
+ crypto_free_ahash(fb);
out_free_nhash:
crypto_free_ahash(nhash);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_clone_ahash);
+static int ahash_default_export_core(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
+static int ahash_default_import_core(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
@@ -994,7 +945,11 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
if (alg->halg.statesize == 0)
return -EINVAL;
- if (alg->reqsize && alg->reqsize < alg->halg.statesize)
+ if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize)
+ return -EINVAL;
+
+ if (!(base->cra_flags & CRYPTO_ALG_ASYNC) &&
+ base->cra_reqsize > MAX_SYNC_HASH_REQSIZE)
return -EINVAL;
err = hash_prepare_alg(&alg->halg);
@@ -1004,9 +959,28 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
base->cra_type = &crypto_ahash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
+ if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) &
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT))
+ base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+
if (!alg->setkey)
alg->setkey = ahash_nosetkey;
+ if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+ BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+ if (!alg->finup)
+ return -EINVAL;
+
+ base->cra_reqsize += base->cra_blocksize + 1;
+ alg->halg.statesize += base->cra_blocksize + 1;
+ alg->export_core = alg->export;
+ alg->import_core = alg->import;
+ } else if (!alg->export_core || !alg->import_core) {
+ alg->export_core = ahash_default_export_core;
+ alg->import_core = ahash_default_import_core;
+ base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ }
+
return 0;
}
@@ -1074,5 +1048,42 @@ int ahash_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
+void ahash_request_free(struct ahash_request *req)
+{
+ if (unlikely(!req))
+ return;
+
+ if (!ahash_req_on_stack(req)) {
+ kfree(req);
+ return;
+ }
+
+ ahash_request_zero(req);
+}
+EXPORT_SYMBOL_GPL(ahash_request_free);
+
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm));
+ int err;
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, data, out, len);
+ err = crypto_ahash_digest(req);
+
+ ahash_request_zero(req);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_digest);
+
+void ahash_free_singlespawn_instance(struct ahash_instance *inst)
+{
+ crypto_drop_spawn(ahash_instance_ctx(inst));
+ kfree(inst);
+}
+EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 72c82d9aa077..a36f50c83827 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -97,6 +97,7 @@ static const struct crypto_type crypto_akcipher_type = {
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
.tfmsize = offsetof(struct crypto_akcipher, base),
+ .algsize = offsetof(struct akcipher_alg, base),
};
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
diff --git a/crypto/algapi.c b/crypto/algapi.c
index ea9ed9580aa8..e604d0d8b7b4 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -71,12 +71,23 @@ static void crypto_free_instance(struct crypto_instance *inst)
static void crypto_destroy_instance_workfn(struct work_struct *w)
{
- struct crypto_instance *inst = container_of(w, struct crypto_instance,
+ struct crypto_template *tmpl = container_of(w, struct crypto_template,
free_work);
- struct crypto_template *tmpl = inst->tmpl;
+ struct crypto_instance *inst;
+ struct hlist_node *n;
+ HLIST_HEAD(list);
+
+ down_write(&crypto_alg_sem);
+ hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) {
+ if (refcount_read(&inst->alg.cra_refcnt) != -1)
+ continue;
+ hlist_del(&inst->list);
+ hlist_add_head(&inst->list, &list);
+ }
+ up_write(&crypto_alg_sem);
- crypto_free_instance(inst);
- crypto_tmpl_put(tmpl);
+ hlist_for_each_entry_safe(inst, n, &list, list)
+ crypto_free_instance(inst);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
@@ -84,9 +95,10 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
struct crypto_instance *inst = container_of(alg,
struct crypto_instance,
alg);
+ struct crypto_template *tmpl = inst->tmpl;
- INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
- schedule_work(&inst->free_work);
+ refcount_set(&alg->cra_refcnt, -1);
+ schedule_work(&tmpl->free_work);
}
/*
@@ -132,14 +144,16 @@ static void crypto_remove_instance(struct crypto_instance *inst,
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
- if (!tmpl || !crypto_tmpl_get(tmpl))
+ if (!tmpl)
return;
- list_move(&inst->alg.cra_list, list);
+ list_del_init(&inst->alg.cra_list);
hlist_del(&inst->list);
- inst->alg.cra_destroy = crypto_destroy_instance;
+ hlist_add_head(&inst->list, &tmpl->dead);
BUG_ON(!list_empty(&inst->alg.cra_users));
+
+ crypto_alg_put(&inst->alg);
}
/*
@@ -260,8 +274,7 @@ static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg)
{
struct crypto_larval *larval;
- if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER) ||
- IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) ||
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) ||
(alg->cra_flags & CRYPTO_ALG_INTERNAL))
return NULL; /* No self-test needed */
@@ -404,6 +417,15 @@ void crypto_remove_final(struct list_head *list)
}
EXPORT_SYMBOL_GPL(crypto_remove_final);
+static void crypto_free_alg(struct crypto_alg *alg)
+{
+ unsigned int algsize = alg->cra_type->algsize;
+ u8 *p = (u8 *)alg - algsize;
+
+ crypto_destroy_alg(alg);
+ kfree(p);
+}
+
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
@@ -416,6 +438,19 @@ int crypto_register_alg(struct crypto_alg *alg)
if (err)
return err;
+ if (alg->cra_flags & CRYPTO_ALG_DUP_FIRST &&
+ !WARN_ON_ONCE(alg->cra_destroy)) {
+ unsigned int algsize = alg->cra_type->algsize;
+ u8 *p = (u8 *)alg - algsize;
+
+ p = kmemdup(p, algsize + sizeof(*alg), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ alg = (void *)(p + algsize);
+ alg->cra_destroy = crypto_free_alg;
+ }
+
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
@@ -424,8 +459,10 @@ int crypto_register_alg(struct crypto_alg *alg)
}
up_write(&crypto_alg_sem);
- if (IS_ERR(larval))
+ if (IS_ERR(larval)) {
+ crypto_alg_put(alg);
return PTR_ERR(larval);
+ }
if (test_started)
crypto_schedule_test(larval);
@@ -461,11 +498,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
- if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
- return;
-
- crypto_alg_put(alg);
+ WARN_ON(!alg->cra_destroy && refcount_read(&alg->cra_refcnt) != 1);
+ list_add(&alg->cra_list, &list);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
@@ -504,6 +539,8 @@ int crypto_register_template(struct crypto_template *tmpl)
struct crypto_template *q;
int err = -EEXIST;
+ INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn);
+
down_write(&crypto_alg_sem);
crypto_check_module_sig(tmpl->module);
@@ -565,6 +602,8 @@ void crypto_unregister_template(struct crypto_template *tmpl)
crypto_free_instance(inst);
}
crypto_remove_final(&users);
+
+ flush_work(&tmpl->free_work);
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
@@ -618,6 +657,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
+ inst->alg.cra_destroy = crypto_destroy_instance;
down_write(&crypto_alg_sem);
@@ -883,20 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg)
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg)
{
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_inst_setname);
+EXPORT_SYMBOL_GPL(__crypto_inst_setname);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
{
@@ -1018,7 +1058,7 @@ static void __init crypto_start_tests(void)
if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI))
return;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return;
set_crypto_boot_test_finished();
diff --git a/crypto/algboss.c b/crypto/algboss.c
index a20926bfd34e..846f586889ee 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -189,7 +189,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
struct task_struct *thread;
struct crypto_test_param *param;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return NOTIFY_DONE;
if (!try_module_get(THIS_MODULE))
@@ -247,13 +247,7 @@ static void __exit cryptomgr_exit(void)
BUG_ON(err);
}
-/*
- * This is arch_initcall() so that the crypto self-tests are run on algorithms
- * registered early by subsys_initcall(). subsys_initcall() is needed for
- * generic implementations so that they're available for comparison tests when
- * other implementations are registered later by module_init().
- */
-arch_initcall(cryptomgr_init);
+module_init(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 7d58cbbce4af..79b016a899a1 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -27,7 +27,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
#include <crypto/skcipher.h>
-#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
@@ -36,19 +35,13 @@
#include <linux/net.h>
#include <net/sock.h>
-struct aead_tfm {
- struct crypto_aead *aead;
- struct crypto_sync_skcipher *null_tfm;
-};
-
static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int as = crypto_aead_authsize(tfm);
/*
@@ -64,27 +57,12 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int ivsize = crypto_aead_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
}
-static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
- struct scatterlist *src,
- struct scatterlist *dst, unsigned int len)
-{
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
-
- skcipher_request_set_sync_tfm(skreq, null_tfm);
- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
- skcipher_request_set_crypt(skreq, src, dst, len, NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -93,9 +71,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
- struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
+ struct crypto_aead *tfm = pask->private;
unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl, *tmp;
@@ -223,11 +199,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* v v
* RX SGL: AAD || PT || Tag
*/
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl,
- processed);
- if (err)
- goto free;
+ memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src,
+ processed);
af_alg_pull_tsgl(sk, processed, NULL, 0);
} else {
/*
@@ -241,12 +214,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* RX SGL: AAD || CT ----+
*/
- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl,
- outlen);
- if (err)
- goto free;
+ /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+ memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen);
/* Create TX SGL for tag and chain it to RX SGL. */
areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
@@ -379,7 +348,7 @@ static int aead_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct aead_tfm *tfm;
+ struct crypto_aead *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -393,7 +362,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
atomic_dec(&pask->nokey_refcnt);
@@ -454,54 +423,22 @@ static struct proto_ops algif_aead_ops_nokey = {
static void *aead_bind(const char *name, u32 type, u32 mask)
{
- struct aead_tfm *tfm;
- struct crypto_aead *aead;
- struct crypto_sync_skcipher *null_tfm;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- aead = crypto_alloc_aead(name, type, mask);
- if (IS_ERR(aead)) {
- kfree(tfm);
- return ERR_CAST(aead);
- }
-
- null_tfm = crypto_get_default_null_skcipher();
- if (IS_ERR(null_tfm)) {
- crypto_free_aead(aead);
- kfree(tfm);
- return ERR_CAST(null_tfm);
- }
-
- tfm->aead = aead;
- tfm->null_tfm = null_tfm;
-
- return tfm;
+ return crypto_alloc_aead(name, type, mask);
}
static void aead_release(void *private)
{
- struct aead_tfm *tfm = private;
-
- crypto_free_aead(tfm->aead);
- crypto_put_default_null_skcipher();
- kfree(tfm);
+ crypto_free_aead(private);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
- struct aead_tfm *tfm = private;
-
- return crypto_aead_setauthsize(tfm->aead, authsize);
+ return crypto_aead_setauthsize(private, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct aead_tfm *tfm = private;
-
- return crypto_aead_setkey(tfm->aead, key, keylen);
+ return crypto_aead_setkey(private, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@@ -510,8 +447,7 @@ static void aead_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int ivlen = crypto_aead_ivsize(tfm);
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
@@ -524,10 +460,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- struct aead_tfm *tfm = private;
- struct crypto_aead *aead = tfm->aead;
+ struct crypto_aead *tfm = private;
unsigned int len = sizeof(*ctx);
- unsigned int ivlen = crypto_aead_ivsize(aead);
+ unsigned int ivlen = crypto_aead_ivsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -554,9 +489,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
static int aead_accept_parent(void *private, struct sock *sk)
{
- struct aead_tfm *tfm = private;
+ struct crypto_aead *tfm = private;
- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 5498a87249d3..e3f1a4852737 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock,
goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
- if (err) {
- sock_orphan(sk2);
- sock_put(sk2);
- }
out_free_state:
kfree_sensitive(state);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 64f57c4c4b06..153523ce6076 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -467,7 +467,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-subsys_initcall(prng_mod_init);
+module_init(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 886e7c913688..4268c3833baa 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -694,7 +694,7 @@ static void __exit anubis_mod_fini(void)
crypto_unregister_alg(&anubis_alg);
}
-subsys_initcall(anubis_mod_init);
+module_init(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/api.c b/crypto/api.c
index 3416e98128a0..5724d62e9d07 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -31,8 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
-#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && \
- !IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
#endif
@@ -220,10 +219,19 @@ again:
if (crypto_is_test_larval(larval))
crypto_larval_kill(larval);
alg = ERR_PTR(-ETIMEDOUT);
- } else if (!alg) {
+ } else if (!alg || PTR_ERR(alg) == -EEXIST) {
+ int err = alg ? -EEXIST : -EAGAIN;
+
+ /*
+ * EEXIST is expected because two probes can be scheduled
+ * at the same time with one using alg_name and the other
+ * using driver_name. Do a re-lookup but do not retry in
+ * case we hit a quirk like gcm_base(ctr(aes),...) which
+ * will never match.
+ */
alg = &larval->alg;
alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
- ERR_PTR(-EAGAIN);
+ ERR_PTR(err);
} else if (IS_ERR(alg))
;
else if (crypto_is_test_larval(larval) &&
@@ -528,6 +536,7 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
goto out;
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+ tfm->fb = tfm;
err = frontend->init_tfm(tfm);
if (err)
@@ -569,7 +578,7 @@ void *crypto_clone_tfm(const struct crypto_type *frontend,
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
tfm->crt_flags = otfm->crt_flags;
- tfm->exit = otfm->exit;
+ tfm->fb = tfm;
out:
return mem;
@@ -707,11 +716,27 @@ void crypto_destroy_alg(struct crypto_alg *alg)
{
if (alg->cra_type && alg->cra_type->destroy)
alg->cra_type->destroy(alg);
-
if (alg->cra_destroy)
alg->cra_destroy(alg);
}
EXPORT_SYMBOL_GPL(crypto_destroy_alg);
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct crypto_async_request *nreq;
+
+ nreq = kmemdup(req, total, gfp);
+ if (!nreq) {
+ req->tfm = tfm->fb;
+ return req;
+ }
+
+ nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(crypto_request_clone);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 1a4825c97c5a..1608018111d0 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -73,7 +73,7 @@ static void __exit arc4_exit(void)
crypto_unregister_lskcipher(&arc4_alg);
}
-subsys_initcall(arc4_init);
+module_init(arc4_init);
module_exit(arc4_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index bd359d3313c2..faa7900383f6 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -304,7 +304,7 @@ static void __exit aria_fini(void)
crypto_unregister_alg(&aria_alg);
}
-subsys_initcall(aria_init);
+module_init(aria_init);
module_exit(aria_fini);
MODULE_DESCRIPTION("ARIA Cipher Algorithm");
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index bf165d321440..e5b177c8e842 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -163,10 +163,8 @@ static u8 *pkey_pack_u32(u8 *dst, u32 val)
static int software_key_query(const struct kernel_pkey_params *params,
struct kernel_pkey_query *info)
{
- struct crypto_akcipher *tfm;
struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_sig *sig;
u8 *key, *ptr;
int ret, len;
bool issig;
@@ -188,7 +186,11 @@ static int software_key_query(const struct kernel_pkey_params *params,
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
+ memset(info, 0, sizeof(*info));
+
if (issig) {
+ struct crypto_sig *sig;
+
sig = crypto_alloc_sig(alg_name, 0, 0);
if (IS_ERR(sig)) {
ret = PTR_ERR(sig);
@@ -200,9 +202,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
else
ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
if (ret < 0)
- goto error_free_tfm;
+ goto error_free_sig;
len = crypto_sig_keysize(sig);
+ info->key_size = len;
info->max_sig_size = crypto_sig_maxsize(sig);
info->max_data_size = crypto_sig_digestsize(sig);
@@ -211,11 +214,19 @@ static int software_key_query(const struct kernel_pkey_params *params,
info->supported_ops |= KEYCTL_SUPPORTS_SIGN;
if (strcmp(params->encoding, "pkcs1") == 0) {
+ info->max_enc_size = len / BITS_PER_BYTE;
+ info->max_dec_size = len / BITS_PER_BYTE;
+
info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
}
+
+error_free_sig:
+ crypto_free_sig(sig);
} else {
+ struct crypto_akcipher *tfm;
+
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
@@ -227,28 +238,23 @@ static int software_key_query(const struct kernel_pkey_params *params,
else
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret < 0)
- goto error_free_tfm;
+ goto error_free_akcipher;
len = crypto_akcipher_maxsize(tfm);
+ info->key_size = len * BITS_PER_BYTE;
info->max_sig_size = len;
info->max_data_size = len;
+ info->max_enc_size = len;
+ info->max_dec_size = len;
info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
- }
-
- info->key_size = len * 8;
- info->max_enc_size = len;
- info->max_dec_size = len;
-
- ret = 0;
-error_free_tfm:
- if (issig)
- crypto_free_sig(sig);
- else
+error_free_akcipher:
crypto_free_akcipher(tfm);
+ }
+
error_free_key:
kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index ee2fdab42334..2ffe4ae90bea 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -372,10 +372,9 @@ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen,
/* Empty name string if no material */
if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) {
- buffer = kmalloc(1, GFP_KERNEL);
+ buffer = kzalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- buffer[0] = 0;
goto done;
}
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 3aaf3ab4e360..a723769c8777 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -9,7 +9,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -28,7 +27,6 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
};
struct authenc_request_ctx {
@@ -170,21 +168,6 @@ out:
authenc_request_complete(areq, err);
}
-static int crypto_authenc_copy_assoc(struct aead_request *req)
-{
- struct crypto_aead *authenc = crypto_aead_reqtfm(req);
- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_sync_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
- NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
@@ -203,10 +186,7 @@ static int crypto_authenc_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
- err = crypto_authenc_copy_assoc(req);
- if (err)
- return err;
-
+ memcpy_sglist(req->dst, req->src, req->assoclen);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
@@ -303,7 +283,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@@ -315,14 +294,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_skcipher;
-
ctx->auth = auth;
ctx->enc = enc;
- ctx->null = null;
crypto_aead_set_reqsize(
tfm,
@@ -336,8 +309,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
return 0;
-err_free_skcipher:
- crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -349,7 +320,6 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
@@ -451,7 +421,7 @@ static void __exit crypto_authenc_module_exit(void)
crypto_unregister_template(&crypto_authenc_tmpl);
}
-subsys_initcall(crypto_authenc_module_init);
+module_init(crypto_authenc_module_init);
module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 2cc933e2f790..d1bf0fda3f2e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -12,7 +12,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -31,7 +30,6 @@ struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
};
struct authenc_esn_request_ctx {
@@ -158,20 +156,6 @@ static void crypto_authenc_esn_encrypt_done(void *data, int err)
authenc_esn_request_complete(areq, err);
}
-static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
-{
- struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
- struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_sync_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
@@ -190,10 +174,7 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
- err = crypto_authenc_esn_copy(req, assoclen);
- if (err)
- return err;
-
+ memcpy_sglist(req->dst, req->src, assoclen);
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
}
@@ -277,11 +258,8 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
cryptlen -= authsize;
- if (req->src != dst) {
- err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
- if (err)
- return err;
- }
+ if (req->src != dst)
+ memcpy_sglist(dst, req->src, assoclen + cryptlen);
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
authsize, 0);
@@ -317,7 +295,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@@ -329,14 +306,8 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_skcipher;
-
ctx->auth = auth;
ctx->enc = enc;
- ctx->null = null;
ctx->reqoff = 2 * crypto_ahash_digestsize(auth);
@@ -352,8 +323,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
return 0;
-err_free_skcipher:
- crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -365,7 +334,6 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
@@ -465,7 +433,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
-subsys_initcall(crypto_authenc_esn_module_init);
+module_init(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
index 04a712ddfb43..60f056217510 100644
--- a/crypto/blake2b_generic.c
+++ b/crypto/blake2b_generic.c
@@ -15,12 +15,12 @@
* More information about BLAKE2 can be found at https://blake2.net.
*/
-#include <linux/unaligned.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
#include <crypto/internal/blake2b.h>
#include <crypto/internal/hash.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
static const u8 blake2b_sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
@@ -111,8 +111,8 @@ static void blake2b_compress_one_generic(struct blake2b_state *S,
#undef G
#undef ROUND
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc)
+static void blake2b_compress_generic(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc)
{
do {
blake2b_increment_counter(state, inc);
@@ -120,17 +120,19 @@ void blake2b_compress_generic(struct blake2b_state *state,
block += BLAKE2B_BLOCK_SIZE;
} while (--nblocks);
}
-EXPORT_SYMBOL(blake2b_compress_generic);
static int crypto_blake2b_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic);
+ return crypto_blake2b_update_bo(desc, in, inlen,
+ blake2b_compress_generic);
}
-static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
+static int crypto_blake2b_finup_generic(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out)
{
- return crypto_blake2b_final(desc, out, blake2b_compress_generic);
+ return crypto_blake2b_finup(desc, in, inlen, out,
+ blake2b_compress_generic);
}
#define BLAKE2B_ALG(name, driver_name, digest_size) \
@@ -138,7 +140,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 100, \
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \
+ CRYPTO_AHASH_ALG_BLOCK_ONLY | \
+ CRYPTO_AHASH_ALG_FINAL_NONZERO, \
.base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
@@ -146,8 +150,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
.setkey = crypto_blake2b_setkey, \
.init = crypto_blake2b_init, \
.update = crypto_blake2b_update_generic, \
- .final = crypto_blake2b_final_generic, \
- .descsize = sizeof(struct blake2b_state), \
+ .finup = crypto_blake2b_finup_generic, \
+ .descsize = BLAKE2B_DESC_SIZE, \
+ .statesize = BLAKE2B_STATE_SIZE, \
}
static struct shash_alg blake2b_algs[] = {
@@ -171,7 +176,7 @@ static void __exit blake2b_mod_fini(void)
crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
}
-subsys_initcall(blake2b_mod_init);
+module_init(blake2b_mod_init);
module_exit(blake2b_mod_fini);
MODULE_AUTHOR("David Sterba <kdave@kernel.org>");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 0146bc762c09..f3c5f9b09850 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -124,7 +124,7 @@ static void __exit blowfish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(blowfish_mod_init);
+module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 197fcf3abc89..ee4336a04b93 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1064,7 +1064,7 @@ static void __exit camellia_fini(void)
crypto_unregister_alg(&camellia_alg);
}
-subsys_initcall(camellia_init);
+module_init(camellia_init);
module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index f3e57775fa02..f68330793e0c 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -531,7 +531,7 @@ static void __exit cast5_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(cast5_mod_init);
+module_init(cast5_mod_init);
module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 11b725b12f27..4c08c42646f0 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -271,7 +271,7 @@ static void __exit cast6_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(cast6_mod_init);
+module_init(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index e81918ca68b7..ed3df6246765 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -179,7 +179,7 @@ static void __exit crypto_cbc_module_exit(void)
crypto_unregister_template(&crypto_cbc_tmpl);
}
-subsys_initcall(crypto_cbc_module_init);
+module_init(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 06476b53b491..2ae929ffdef8 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -10,11 +10,12 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
struct ccm_instance_ctx {
struct crypto_skcipher_spawn ctr;
@@ -54,11 +55,6 @@ struct cbcmac_tfm_ctx {
struct crypto_cipher *child;
};
-struct cbcmac_desc_ctx {
- unsigned int len;
- u8 dg[];
-};
-
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
struct aead_request *req)
{
@@ -783,12 +779,10 @@ static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cbcmac_digest_init(struct shash_desc *pdesc)
{
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_digestsize(pdesc->tfm);
+ u8 *dg = shash_desc_ctx(pdesc);
- ctx->len = 0;
- memset(ctx->dg, 0, bs);
-
+ memset(dg, 0, bs);
return 0;
}
@@ -797,39 +791,34 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
-
- while (len > 0) {
- unsigned int l = min(len, bs - ctx->len);
-
- crypto_xor(&ctx->dg[ctx->len], p, l);
- ctx->len +=l;
- len -= l;
- p += l;
-
- if (ctx->len == bs) {
- crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg);
- ctx->len = 0;
- }
- }
-
- return 0;
+ u8 *dg = shash_desc_ctx(pdesc);
+
+ do {
+ crypto_xor(dg, p, bs);
+ crypto_cipher_encrypt_one(tfm, dg, dg);
+ p += bs;
+ len -= bs;
+ } while (len >= bs);
+ return len;
}
-static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_cbcmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
+ u8 *dg = shash_desc_ctx(pdesc);
- if (ctx->len)
- crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg);
-
- memcpy(out, ctx->dg, bs);
+ if (len) {
+ crypto_xor(dg, src, len);
+ crypto_cipher_encrypt_one(tfm, out, dg);
+ return 0;
+ }
+ memcpy(out, dg, bs);
return 0;
}
@@ -883,19 +872,19 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct cbcmac_desc_ctx) +
- alg->cra_blocksize;
+ inst->alg.descsize = alg->cra_blocksize;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY;
inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx);
inst->alg.base.cra_init = cbcmac_init_tfm;
inst->alg.base.cra_exit = cbcmac_exit_tfm;
inst->alg.init = crypto_cbcmac_digest_init;
inst->alg.update = crypto_cbcmac_digest_update;
- inst->alg.final = crypto_cbcmac_digest_final;
+ inst->alg.finup = crypto_cbcmac_digest_finup;
inst->alg.setkey = crypto_cbcmac_digest_setkey;
inst->free = shash_free_singlespawn_instance;
@@ -940,7 +929,7 @@ static void __exit crypto_ccm_module_exit(void)
ARRAY_SIZE(crypto_ccm_tmpls));
}
-subsys_initcall(crypto_ccm_module_init);
+module_init(crypto_ccm_module_init);
module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha.c b/crypto/chacha.c
new file mode 100644
index 000000000000..c3a11f4e2d13
--- /dev/null
+++ b/crypto/chacha.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers
+ *
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2018 Google LLC
+ */
+
+#include <linux/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/module.h>
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+static int chacha_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+static int chacha20_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+
+static int chacha12_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+
+static int chacha_stream_xor(struct skcipher_request *req,
+ const struct chacha_ctx *ctx,
+ const u8 iv[CHACHA_IV_SIZE], bool arch)
+{
+ struct skcipher_walk walk;
+ struct chacha_state state;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ chacha_init(&state, ctx->key, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
+
+ if (arch)
+ chacha_crypt(&state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes, ctx->nrounds);
+ else
+ chacha_crypt_generic(&state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes,
+ ctx->nrounds);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int crypto_chacha_crypt_generic(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, false);
+}
+
+static int crypto_chacha_crypt_arch(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, true);
+}
+
+static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ struct chacha_state state;
+ u8 real_iv[16];
+
+ /* Compute the subkey given the original key and first 128 nonce bits */
+ chacha_init(&state, ctx->key, req->iv);
+ if (arch)
+ hchacha_block(&state, subctx.key, ctx->nrounds);
+ else
+ hchacha_block_generic(&state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ /* Build the real IV */
+ memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
+ memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
+
+ /* Generate the stream and XOR it with the data */
+ return chacha_stream_xor(req, &subctx, real_iv, arch);
+}
+
+static int crypto_xchacha_crypt_generic(struct skcipher_request *req)
+{
+ return crypto_xchacha_crypt(req, false);
+}
+
+static int crypto_xchacha_crypt_arch(struct skcipher_request *req)
+{
+ return crypto_xchacha_crypt(req, true);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_chacha_crypt_generic,
+ .decrypt = crypto_chacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt_generic,
+ .decrypt = crypto_xchacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt_generic,
+ .decrypt = crypto_xchacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_chacha_crypt_arch,
+ .decrypt = crypto_chacha_crypt_arch,
+ },
+ {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt_arch,
+ .decrypt = crypto_xchacha_crypt_arch,
+ },
+ {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt_arch,
+ .decrypt = crypto_xchacha_crypt_arch,
+ }
+};
+
+static unsigned int num_algs;
+
+static int __init crypto_chacha_mod_init(void)
+{
+ /* register the arch flavours only if they differ from generic */
+ num_algs = ARRAY_SIZE(algs);
+ BUILD_BUG_ON(ARRAY_SIZE(algs) % 2 != 0);
+ if (!chacha_is_arch_optimized())
+ num_algs /= 2;
+
+ return crypto_register_skciphers(algs, num_algs);
+}
+
+static void __exit crypto_chacha_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, num_algs);
+}
+
+module_init(crypto_chacha_mod_init);
+module_exit(crypto_chacha_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-generic");
+MODULE_ALIAS_CRYPTO("chacha20-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-generic");
+MODULE_ALIAS_CRYPTO("xchacha20-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-generic");
+MODULE_ALIAS_CRYPTO("xchacha12-" __stringify(ARCH));
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index d740849f1c19..b4b5a7198d84 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -12,36 +12,23 @@
#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/string.h>
struct chachapoly_instance_ctx {
struct crypto_skcipher_spawn chacha;
- struct crypto_ahash_spawn poly;
unsigned int saltlen;
};
struct chachapoly_ctx {
struct crypto_skcipher *chacha;
- struct crypto_ahash *poly;
/* key bytes we use for the ChaCha20 IV */
unsigned int saltlen;
u8 salt[] __counted_by(saltlen);
};
-struct poly_req {
- /* zero byte padding for AD/ciphertext, as needed */
- u8 pad[POLY1305_BLOCK_SIZE];
- /* tail data with AD/ciphertext lengths */
- struct {
- __le64 assoclen;
- __le64 cryptlen;
- } tail;
- struct scatterlist src[1];
- struct ahash_request req; /* must be last member */
-};
-
struct chacha_req {
u8 iv[CHACHA_IV_SIZE];
struct scatterlist src[1];
@@ -62,7 +49,6 @@ struct chachapoly_req_ctx {
/* request flags, with MAY_SLEEP cleared if needed */
u32 flags;
union {
- struct poly_req poly;
struct chacha_req chacha;
} u;
};
@@ -105,16 +91,6 @@ static int poly_verify_tag(struct aead_request *req)
return 0;
}
-static int poly_copy_tag(struct aead_request *req)
-{
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
-
- scatterwalk_map_and_copy(rctx->tag, req->dst,
- req->assoclen + rctx->cryptlen,
- sizeof(rctx->tag), 1);
- return 0;
-}
-
static void chacha_decrypt_done(void *data, int err)
{
async_done_continue(data, err, poly_verify_tag);
@@ -151,210 +127,76 @@ skip:
return poly_verify_tag(req);
}
-static int poly_tail_continue(struct aead_request *req)
-{
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
-
- if (rctx->cryptlen == req->cryptlen) /* encrypting */
- return poly_copy_tag(req);
-
- return chacha_decrypt(req);
-}
-
-static void poly_tail_done(void *data, int err)
-{
- async_done_continue(data, err, poly_tail_continue);
-}
-
-static int poly_tail(struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
-
- preq->tail.assoclen = cpu_to_le64(rctx->assoclen);
- preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen);
- sg_init_one(preq->src, &preq->tail, sizeof(preq->tail));
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_tail_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src,
- rctx->tag, sizeof(preq->tail));
-
- err = crypto_ahash_finup(&preq->req);
- if (err)
- return err;
-
- return poly_tail_continue(req);
-}
-
-static void poly_cipherpad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_tail);
-}
-
-static int poly_cipherpad(struct aead_request *req)
+static int poly_hash(struct aead_request *req)
{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
+ const void *zp = page_address(ZERO_PAGE(0));
+ struct scatterlist *sg = req->src;
+ struct poly1305_desc_ctx desc;
+ struct scatter_walk walk;
+ struct {
+ union {
+ struct {
+ __le64 assoclen;
+ __le64 cryptlen;
+ };
+ u8 u8[16];
+ };
+ } tail;
unsigned int padlen;
- int err;
-
- padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
- memset(preq->pad, 0, sizeof(preq->pad));
- sg_init_one(preq->src, preq->pad, padlen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_cipherpad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+ unsigned int total;
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_tail(req);
-}
-
-static void poly_cipher_done(void *data, int err)
-{
- async_done_continue(data, err, poly_cipherpad);
-}
-
-static int poly_cipher(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- struct scatterlist *crypt = req->src;
- int err;
+ if (sg != req->dst)
+ memcpy_sglist(req->dst, sg, req->assoclen);
if (rctx->cryptlen == req->cryptlen) /* encrypting */
- crypt = req->dst;
-
- crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_cipher_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
+ sg = req->dst;
- return poly_cipherpad(req);
-}
+ poly1305_init(&desc, rctx->key);
+ scatterwalk_start(&walk, sg);
-static void poly_adpad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_cipher);
-}
+ total = rctx->assoclen;
+ while (total) {
+ unsigned int n = scatterwalk_next(&walk, total);
-static int poly_adpad(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- unsigned int padlen;
- int err;
+ poly1305_update(&desc, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ total -= n;
+ }
padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE;
- memset(preq->pad, 0, sizeof(preq->pad));
- sg_init_one(preq->src, preq->pad, padlen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_adpad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_cipher(req);
-}
-
-static void poly_ad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_adpad);
-}
-
-static int poly_ad(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_ad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_adpad(req);
-}
-
-static void poly_setkey_done(void *data, int err)
-{
- async_done_continue(data, err, poly_ad);
-}
+ poly1305_update(&desc, zp, padlen);
-static int poly_setkey(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
+ scatterwalk_skip(&walk, req->assoclen - rctx->assoclen);
- sg_init_one(preq->src, rctx->key, sizeof(rctx->key));
+ total = rctx->cryptlen;
+ while (total) {
+ unsigned int n = scatterwalk_next(&walk, total);
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_setkey_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_ad(req);
-}
-
-static void poly_init_done(void *data, int err)
-{
- async_done_continue(data, err, poly_setkey);
-}
+ poly1305_update(&desc, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ total -= n;
+ }
-static int poly_init(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
+ padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
+ poly1305_update(&desc, zp, padlen);
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_init_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
+ tail.assoclen = cpu_to_le64(rctx->assoclen);
+ tail.cryptlen = cpu_to_le64(rctx->cryptlen);
+ poly1305_update(&desc, tail.u8, sizeof(tail));
+ memzero_explicit(&tail, sizeof(tail));
+ poly1305_final(&desc, rctx->tag);
- err = crypto_ahash_init(&preq->req);
- if (err)
- return err;
+ if (rctx->cryptlen != req->cryptlen)
+ return chacha_decrypt(req);
- return poly_setkey(req);
+ memcpy_to_scatterwalk(&walk, rctx->tag, sizeof(rctx->tag));
+ return 0;
}
static void poly_genkey_done(void *data, int err)
{
- async_done_continue(data, err, poly_init);
+ async_done_continue(data, err, poly_hash);
}
static int poly_genkey(struct aead_request *req)
@@ -388,7 +230,7 @@ static int poly_genkey(struct aead_request *req)
if (err)
return err;
- return poly_init(req);
+ return poly_hash(req);
}
static void chacha_encrypt_done(void *data, int err)
@@ -437,14 +279,7 @@ static int chachapoly_encrypt(struct aead_request *req)
/* encrypt call chain:
* - chacha_encrypt/done()
* - poly_genkey/done()
- * - poly_init/done()
- * - poly_setkey/done()
- * - poly_ad/done()
- * - poly_adpad/done()
- * - poly_cipher/done()
- * - poly_cipherpad/done()
- * - poly_tail/done/continue()
- * - poly_copy_tag()
+ * - poly_hash()
*/
return chacha_encrypt(req);
}
@@ -458,13 +293,7 @@ static int chachapoly_decrypt(struct aead_request *req)
/* decrypt call chain:
* - poly_genkey/done()
- * - poly_init/done()
- * - poly_setkey/done()
- * - poly_ad/done()
- * - poly_adpad/done()
- * - poly_cipher/done()
- * - poly_cipherpad/done()
- * - poly_tail/done/continue()
+ * - poly_hash()
* - chacha_decrypt/done()
* - poly_verify_tag()
*/
@@ -503,21 +332,13 @@ static int chachapoly_init(struct crypto_aead *tfm)
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_skcipher *chacha;
- struct crypto_ahash *poly;
unsigned long align;
- poly = crypto_spawn_ahash(&ictx->poly);
- if (IS_ERR(poly))
- return PTR_ERR(poly);
-
chacha = crypto_spawn_skcipher(&ictx->chacha);
- if (IS_ERR(chacha)) {
- crypto_free_ahash(poly);
+ if (IS_ERR(chacha))
return PTR_ERR(chacha);
- }
ctx->chacha = chacha;
- ctx->poly = poly;
ctx->saltlen = ictx->saltlen;
align = crypto_aead_alignmask(tfm);
@@ -525,12 +346,9 @@ static int chachapoly_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(
tfm,
align + offsetof(struct chachapoly_req_ctx, u) +
- max(offsetof(struct chacha_req, req) +
- sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(chacha),
- offsetof(struct poly_req, req) +
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(poly)));
+ offsetof(struct chacha_req, req) +
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(chacha));
return 0;
}
@@ -539,7 +357,6 @@ static void chachapoly_exit(struct crypto_aead *tfm)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_free_ahash(ctx->poly);
crypto_free_skcipher(ctx->chacha);
}
@@ -548,7 +365,6 @@ static void chachapoly_free(struct aead_instance *inst)
struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->chacha);
- crypto_drop_ahash(&ctx->poly);
kfree(inst);
}
@@ -559,7 +375,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
struct aead_instance *inst;
struct chachapoly_instance_ctx *ctx;
struct skcipher_alg_common *chacha;
- struct hash_alg_common *poly;
int err;
if (ivsize > CHACHAPOLY_IV_SIZE)
@@ -581,14 +396,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
goto err_free_inst;
chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha);
- err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst),
- crypto_attr_alg_name(tb[2]), 0, mask);
- if (err)
- goto err_free_inst;
- poly = crypto_spawn_ahash_alg(&ctx->poly);
-
err = -EINVAL;
- if (poly->digestsize != POLY1305_DIGEST_SIZE)
+ if (strcmp(crypto_attr_alg_name(tb[2]), "poly1305") &&
+ strcmp(crypto_attr_alg_name(tb[2]), "poly1305-generic"))
goto err_free_inst;
/* Need 16-byte IV size, including Initial Block Counter value */
if (chacha->ivsize != CHACHA_IV_SIZE)
@@ -599,16 +409,15 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->base.cra_name,
- poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,poly1305)", name,
+ chacha->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->base.cra_driver_name,
- poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,poly1305-generic)", name,
+ chacha->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_priority = (chacha->base.cra_priority +
- poly->base.cra_priority) / 2;
+ inst->alg.base.cra_priority = chacha->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = chacha->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
@@ -667,7 +476,7 @@ static void __exit chacha20poly1305_module_exit(void)
ARRAY_SIZE(rfc7539_tmpls));
}
-subsys_initcall(chacha20poly1305_module_init);
+module_init(chacha20poly1305_module_init);
module_exit(chacha20poly1305_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
deleted file mode 100644
index 1fb9fbd302c6..000000000000
--- a/crypto/chacha_generic.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2015 Martin Willi
- * Copyright (C) 2018 Google LLC
- */
-
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/module.h>
-
-static int chacha_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
-
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int crypto_chacha_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_stream_xor(req, ctx, req->iv);
-}
-
-static int crypto_xchacha_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- /* Compute the subkey given the original key and first 128 nonce bits */
- chacha_init(state, ctx->key, req->iv);
- hchacha_block_generic(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- /* Build the real IV */
- memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
- memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
-
- /* Generate the stream and XOR it with the data */
- return chacha_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = crypto_chacha_crypt,
- .decrypt = crypto_chacha_crypt,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = crypto_xchacha_crypt,
- .decrypt = crypto_xchacha_crypt,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = crypto_xchacha_crypt,
- .decrypt = crypto_xchacha_crypt,
- }
-};
-
-static int __init chacha_generic_mod_init(void)
-{
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit chacha_generic_mod_fini(void)
-{
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-subsys_initcall(chacha_generic_mod_init);
-module_exit(chacha_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-generic");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-generic");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-generic");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index c66a0f4d8808..1b03964abe00 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -13,9 +13,12 @@
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
/*
* +------------------------
@@ -31,22 +34,6 @@ struct cmac_tfm_ctx {
__be64 consts[];
};
-/*
- * +------------------------
- * | <shash desc>
- * +------------------------
- * | cmac_desc_ctx
- * +------------------------
- * | odds (block size)
- * +------------------------
- * | prev (block size)
- * +------------------------
- */
-struct cmac_desc_ctx {
- unsigned int len;
- u8 odds[];
-};
-
static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
@@ -102,13 +89,10 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cmac_digest_init(struct shash_desc *pdesc)
{
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = &ctx->odds[bs];
+ u8 *prev = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(prev, 0, bs);
-
return 0;
}
@@ -117,77 +101,36 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(odds + ctx->len, p, len);
- ctx->len += len;
- return 0;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- crypto_xor(prev, odds, bs);
- crypto_cipher_encrypt_one(tfm, prev, prev);
+ u8 *prev = shash_desc_ctx(pdesc);
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
+ do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(odds, p, len);
- ctx->len = len;
- }
-
- return 0;
+ } while (len >= bs);
+ return len;
}
-static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_cmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
- if (ctx->len != bs) {
- unsigned int rlen;
- u8 *p = odds + ctx->len;
-
- *p = 0x80;
- p++;
-
- rlen = bs - ctx->len - 1;
- if (rlen)
- memset(p, 0, rlen);
-
+ crypto_xor(prev, src, len);
+ if (len != bs) {
+ prev[len] ^= 0x80;
offset += bs;
}
-
- crypto_xor(prev, odds, bs);
crypto_xor(prev, (const u8 *)tctx->consts + offset, bs);
-
crypto_cipher_encrypt_one(tfm, out, prev);
-
return 0;
}
@@ -269,13 +212,14 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) +
alg->cra_blocksize * 2;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct cmac_desc_ctx) +
- alg->cra_blocksize * 2;
+ inst->alg.descsize = alg->cra_blocksize;
inst->alg.init = crypto_cmac_digest_init;
inst->alg.update = crypto_cmac_digest_update;
- inst->alg.final = crypto_cmac_digest_final;
+ inst->alg.finup = crypto_cmac_digest_finup;
inst->alg.setkey = crypto_cmac_digest_setkey;
inst->alg.init_tfm = cmac_init_tfm;
inst->alg.clone_tfm = cmac_clone_tfm;
@@ -307,7 +251,7 @@ static void __exit crypto_cmac_module_exit(void)
crypto_unregister_template(&crypto_cmac_tmpl);
}
-subsys_initcall(crypto_cmac_module_init);
+module_init(crypto_cmac_module_init);
module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/crc32_generic.c b/crypto/crc32.c
index 783a30b27398..cc371d42601f 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32.c
@@ -172,7 +172,7 @@ static void __exit crc32_mod_fini(void)
crypto_unregister_shashes(algs, num_algs);
}
-subsys_initcall(crc32_mod_init);
+module_init(crc32_mod_init);
module_exit(crc32_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c.c
index b1a36d32dc50..e5377898414a 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c.c
@@ -212,7 +212,7 @@ static void __exit crc32c_mod_fini(void)
crypto_unregister_shashes(algs, num_algs);
}
-subsys_initcall(crc32c_mod_init);
+module_init(crc32c_mod_init);
module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 31d022d47f7a..5bb6f8d88cc2 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -1138,7 +1138,7 @@ static void __exit cryptd_exit(void)
crypto_unregister_template(&cryptd_tmpl);
}
-subsys_initcall(cryptd_init);
+module_init(cryptd_init);
module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index c7c16da5e649..445d3c113ee1 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -23,9 +23,6 @@
#define CRYPTO_ENGINE_MAX_QLEN 10
-/* Temporary algorithm flag used to indicate an updated driver. */
-#define CRYPTO_ALG_ENGINE 0x200
-
struct crypto_engine_alg {
struct crypto_alg base;
struct crypto_engine_op op;
@@ -148,16 +145,9 @@ start_request:
}
}
- if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
- alg = container_of(async_req->tfm->__crt_alg,
- struct crypto_engine_alg, base);
- op = &alg->op;
- } else {
- dev_err(engine->dev, "failed to do request\n");
- ret = -EINVAL;
- goto req_err_1;
- }
-
+ alg = container_of(async_req->tfm->__crt_alg,
+ struct crypto_engine_alg, base);
+ op = &alg->op;
ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
@@ -569,9 +559,6 @@ int crypto_engine_register_aead(struct aead_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
@@ -614,9 +601,6 @@ int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
@@ -660,9 +644,6 @@ int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
@@ -677,9 +658,6 @@ int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
@@ -694,9 +672,6 @@ int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index ced90f88ee07..34588f39fdfc 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -15,15 +15,11 @@
#include <crypto/null.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
-static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock);
-static struct crypto_sync_skcipher *crypto_default_null_skcipher;
-static int crypto_default_null_skcipher_refcnt;
-
static int null_init(struct shash_desc *desc)
{
return 0;
@@ -65,19 +61,9 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
static int null_skcipher_crypt(struct skcipher_request *req)
{
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr != walk.dst.virt.addr)
- memcpy(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src, req->cryptlen);
+ return 0;
}
static struct shash_alg digest_null = {
@@ -129,54 +115,6 @@ static struct crypto_alg cipher_null = {
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
-{
- struct crypto_sync_skcipher *ntfm = NULL;
- struct crypto_sync_skcipher *tfm;
-
- spin_lock_bh(&crypto_default_null_skcipher_lock);
- tfm = crypto_default_null_skcipher;
-
- if (!tfm) {
- spin_unlock_bh(&crypto_default_null_skcipher_lock);
-
- ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
- if (IS_ERR(ntfm))
- return ntfm;
-
- spin_lock_bh(&crypto_default_null_skcipher_lock);
- tfm = crypto_default_null_skcipher;
- if (!tfm) {
- tfm = ntfm;
- ntfm = NULL;
- crypto_default_null_skcipher = tfm;
- }
- }
-
- crypto_default_null_skcipher_refcnt++;
- spin_unlock_bh(&crypto_default_null_skcipher_lock);
-
- crypto_free_sync_skcipher(ntfm);
-
- return tfm;
-}
-EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
-
-void crypto_put_default_null_skcipher(void)
-{
- struct crypto_sync_skcipher *tfm = NULL;
-
- spin_lock_bh(&crypto_default_null_skcipher_lock);
- if (!--crypto_default_null_skcipher_refcnt) {
- tfm = crypto_default_null_skcipher;
- crypto_default_null_skcipher = NULL;
- }
- spin_unlock_bh(&crypto_default_null_skcipher_lock);
-
- crypto_free_sync_skcipher(tfm);
-}
-EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
-
static int __init crypto_null_mod_init(void)
{
int ret = 0;
@@ -210,7 +148,7 @@ static void __exit crypto_null_mod_fini(void)
crypto_unregister_skcipher(&skcipher_null);
}
-subsys_initcall(crypto_null_mod_init);
+module_init(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 97a947b0a876..a388f0ceb3a0 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -350,7 +350,7 @@ static void __exit crypto_ctr_module_exit(void)
ARRAY_SIZE(crypto_ctr_tmpls));
}
-subsys_initcall(crypto_ctr_module_init);
+module_init(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/cts.c b/crypto/cts.c
index f5b42156b6c7..48898d5e24ff 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -402,7 +402,7 @@ static void __exit crypto_cts_module_exit(void)
crypto_unregister_template(&crypto_cts_tmpl);
}
-subsys_initcall(crypto_cts_module_init);
+module_init(crypto_cts_module_init);
module_exit(crypto_cts_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c
index 68a673262e04..f3e56e73c66c 100644
--- a/crypto/curve25519-generic.c
+++ b/crypto/curve25519-generic.c
@@ -82,7 +82,7 @@ static void __exit curve25519_exit(void)
crypto_unregister_kpp(&curve25519_alg);
}
-subsys_initcall(curve25519_init);
+module_init(curve25519_init);
module_exit(curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 5c346c544093..fe8e4ad0fee1 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -6,253 +6,250 @@
* by IPCOMP (RFC 3173 & RFC 2394).
*
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- *
- * The default winbits of 11 should suit most packets, and it may be something
- * to configure on a per-tfm basis in the future.
- *
- * Currently, compression history is not maintained between tfm calls, as
- * it is not needed for IPCOMP and keeps the code simpler. It can be
- * implemented if someone wants it.
+ * Copyright (c) 2023 Google, LLC. <ardb@kernel.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
*/
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/crypto.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
-struct deflate_ctx {
- struct z_stream_s comp_stream;
- struct z_stream_s decomp_stream;
+struct deflate_stream {
+ struct z_stream_s stream;
+ u8 workspace[];
};
-static int deflate_comp_init(struct deflate_ctx *ctx)
-{
- int ret = 0;
- struct z_stream_s *stream = &ctx->comp_stream;
-
- stream->workspace = vzalloc(zlib_deflate_workspacesize(
- -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL));
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
- Z_DEFAULT_STRATEGY);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+static DEFINE_MUTEX(deflate_stream_lock);
-static int deflate_decomp_init(struct deflate_ctx *ctx)
+static void *deflate_alloc_stream(void)
{
- int ret = 0;
- struct z_stream_s *stream = &ctx->decomp_stream;
+ size_t size = max(zlib_inflate_workspacesize(),
+ zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS,
+ DEFLATE_DEF_MEMLEVEL));
+ struct deflate_stream *ctx;
- stream->workspace = vzalloc(zlib_inflate_workspacesize());
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+ ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
-static void deflate_comp_exit(struct deflate_ctx *ctx)
-{
- zlib_deflateEnd(&ctx->comp_stream);
- vfree(ctx->comp_stream.workspace);
-}
+ ctx->stream.workspace = ctx->workspace;
-static void deflate_decomp_exit(struct deflate_ctx *ctx)
-{
- zlib_inflateEnd(&ctx->decomp_stream);
- vfree(ctx->decomp_stream.workspace);
+ return ctx;
}
-static int __deflate_init(void *ctx)
+static struct crypto_acomp_streams deflate_streams = {
+ .alloc_ctx = deflate_alloc_stream,
+ .cfree_ctx = kvfree,
+};
+
+static int deflate_compress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
+ struct z_stream_s *stream = &ds->stream;
+ struct acomp_walk walk;
int ret;
- ret = deflate_comp_init(ctx);
+ ret = acomp_walk_virt(&walk, req, true);
if (ret)
- goto out;
- ret = deflate_decomp_init(ctx);
- if (ret)
- deflate_comp_exit(ctx);
-out:
- return ret;
-}
+ return ret;
-static void *deflate_alloc_ctx(void)
-{
- struct deflate_ctx *ctx;
- int ret;
+ do {
+ unsigned int dcur;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur)
+ return -ENOSPC;
- ret = __deflate_init(ctx);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
- }
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
- return ctx;
-}
+ do {
+ int flush = Z_FINISH;
+ unsigned int scur;
-static void __deflate_exit(void *ctx)
-{
- deflate_comp_exit(ctx);
- deflate_decomp_exit(ctx);
-}
+ stream->avail_in = 0;
+ stream->next_in = NULL;
-static void deflate_free_ctx(void *ctx)
-{
- __deflate_exit(ctx);
- kfree_sensitive(ctx);
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ if (acomp_walk_more_src(&walk, scur))
+ flush = Z_NO_FLUSH;
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ ret = zlib_deflate(stream, flush);
+
+ if (scur) {
+ scur -= stream->avail_in;
+ acomp_walk_done_src(&walk, scur);
+ }
+ } while (ret == Z_OK && stream->avail_out);
+
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_compress(struct acomp_req *req)
{
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->comp_stream;
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ int err;
+
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_deflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
+ err = deflate_compress_one(req, ds);
- ret = zlib_deflate(stream, Z_FINISH);
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
+static int deflate_decompress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
- return __deflate_compress(src, slen, dst, dlen, ctx);
+ struct z_stream_s *stream = &ds->stream;
+ bool out_of_space = false;
+ struct acomp_walk walk;
+ int ret;
+
+ ret = acomp_walk_virt(&walk, req, true);
+ if (ret)
+ return ret;
+
+ do {
+ unsigned int scur;
+
+ stream->avail_in = 0;
+ stream->next_in = NULL;
+
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ do {
+ unsigned int dcur;
+
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur) {
+ out_of_space = true;
+ break;
+ }
+
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
+
+ ret = zlib_inflate(stream, Z_NO_FLUSH);
+
+ dcur -= stream->avail_out;
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK && stream->avail_in);
+
+ if (scur)
+ acomp_walk_done_src(&walk, scur);
+
+ if (out_of_space)
+ return -ENOSPC;
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_decompress(struct acomp_req *req)
{
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ int err;
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->decomp_stream;
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_inflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
-
- ret = zlib_inflate(stream, Z_SYNC_FLUSH);
- /*
- * Work around a bug in zlib, which sometimes wants to taste an extra
- * byte when being used in the (undocumented) raw deflate mode.
- * (From USAGI).
- */
- if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
- u8 zerostuff = 0;
- stream->next_in = &zerostuff;
- stream->avail_in = 1;
- ret = zlib_inflate(stream, Z_FINISH);
- }
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
+ err = deflate_decompress_one(req, ds);
+
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
+static int deflate_init(struct crypto_acomp *tfm)
{
- return __deflate_decompress(src, slen, dst, dlen, ctx);
+ int ret;
+
+ mutex_lock(&deflate_stream_lock);
+ ret = crypto_acomp_alloc_streams(&deflate_streams);
+ mutex_unlock(&deflate_stream_lock);
+
+ return ret;
}
-static struct scomp_alg scomp = {
- .alloc_ctx = deflate_alloc_ctx,
- .free_ctx = deflate_free_ctx,
- .compress = deflate_scompress,
- .decompress = deflate_sdecompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp",
- .cra_module = THIS_MODULE,
- }
+static struct acomp_alg acomp = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .init = deflate_init,
+ .base.cra_name = "deflate",
+ .base.cra_driver_name = "deflate-generic",
+ .base.cra_flags = CRYPTO_ALG_REQ_VIRT,
+ .base.cra_module = THIS_MODULE,
};
static int __init deflate_mod_init(void)
{
- return crypto_register_scomp(&scomp);
+ return crypto_register_acomp(&acomp);
}
static void __exit deflate_mod_fini(void)
{
- crypto_unregister_scomp(&scomp);
+ crypto_unregister_acomp(&acomp);
+ crypto_acomp_free_streams(&deflate_streams);
}
-subsys_initcall(deflate_mod_init);
+module_init(deflate_mod_init);
module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>");
MODULE_ALIAS_CRYPTO("deflate");
MODULE_ALIAS_CRYPTO("deflate-generic");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 1274e18d3eb9..fce341400914 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -122,7 +122,7 @@ static void __exit des_generic_mod_fini(void)
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
}
-subsys_initcall(des_generic_mod_init);
+module_init(des_generic_mod_init);
module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/dh.c b/crypto/dh.c
index afc0fd847761..8250eeeebd0f 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -920,7 +920,7 @@ static void __exit dh_exit(void)
crypto_unregister_kpp(&dh);
}
-subsys_initcall(dh_init);
+module_init(dh_init);
module_exit(dh_exit);
MODULE_ALIAS_CRYPTO("dh");
MODULE_LICENSE("GPL");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index f28dfc2511a2..dbe4c8bb5ceb 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -2132,7 +2132,7 @@ static void __exit drbg_exit(void)
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
-subsys_initcall(drbg_init);
+module_init(drbg_init);
module_exit(drbg_exit);
#ifndef CRYPTO_DRBG_HASH_STRING
#define CRYPTO_DRBG_HASH_STRING ""
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 95d7e972865a..cd1b20456dad 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -219,7 +219,7 @@ static void __exit crypto_ecb_module_exit(void)
crypto_unregister_template(&crypto_ecb_tmpl);
}
-subsys_initcall(crypto_ecb_module_init);
+module_init(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 72cfd1590156..9f0b93b3166d 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -240,7 +240,7 @@ static void __exit ecdh_exit(void)
crypto_unregister_kpp(&ecdh_nist_p384);
}
-subsys_initcall(ecdh_init);
+module_init(ecdh_init);
module_exit(ecdh_exit);
MODULE_ALIAS_CRYPTO("ecdh");
MODULE_LICENSE("GPL");
diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c
index 4454f1f8f33f..e0c55c64711c 100644
--- a/crypto/ecdsa-p1363.c
+++ b/crypto/ecdsa-p1363.c
@@ -21,7 +21,8 @@ static int ecdsa_p1363_verify(struct crypto_sig *tfm,
const void *digest, unsigned int dlen)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
- unsigned int keylen = crypto_sig_keysize(ctx->child);
+ unsigned int keylen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64));
struct ecdsa_raw_sig sig;
@@ -45,7 +46,8 @@ static unsigned int ecdsa_p1363_max_size(struct crypto_sig *tfm)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
- return 2 * crypto_sig_keysize(ctx->child);
+ return 2 * DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
}
static unsigned int ecdsa_p1363_digest_size(struct crypto_sig *tfm)
diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c
index 90a04f4b9a2f..ee71594d10a0 100644
--- a/crypto/ecdsa-x962.c
+++ b/crypto/ecdsa-x962.c
@@ -82,7 +82,7 @@ static int ecdsa_x962_verify(struct crypto_sig *tfm,
int err;
sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
- sizeof(u64));
+ sizeof(u64) * BITS_PER_BYTE);
err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen);
if (err < 0)
@@ -103,7 +103,8 @@ static unsigned int ecdsa_x962_max_size(struct crypto_sig *tfm)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
struct sig_alg *alg = crypto_sig_alg(ctx->child);
- int slen = crypto_sig_keysize(ctx->child);
+ int slen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
/*
* Verify takes ECDSA-Sig-Value (described in RFC 5480) as input,
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index 117526d15dde..ce8e4364842f 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -167,7 +167,7 @@ static unsigned int ecdsa_key_size(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
- return DIV_ROUND_UP(ctx->curve->nbits, 8);
+ return ctx->curve->nbits;
}
static unsigned int ecdsa_digest_size(struct crypto_sig *tfm)
@@ -334,7 +334,7 @@ static void __exit ecdsa_exit(void)
crypto_unregister_sig(&ecdsa_nist_p521);
}
-subsys_initcall(ecdsa_init);
+module_init(ecdsa_init);
module_exit(ecdsa_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 69686668625e..e0a2d3209938 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -32,7 +32,6 @@ static int echainiv_encrypt(struct aead_request *req)
u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
- int err;
if (req->cryptlen < ivsize)
return -EINVAL;
@@ -41,20 +40,9 @@ static int echainiv_encrypt(struct aead_request *req)
info = req->iv;
- if (req->src != req->dst) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
-
- skcipher_request_set_sync_tfm(nreq, ctx->sknull);
- skcipher_request_set_callback(nreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst,
- req->assoclen + req->cryptlen,
- NULL);
-
- err = crypto_skcipher_encrypt(nreq);
- if (err)
- return err;
- }
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src,
+ req->assoclen + req->cryptlen);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
@@ -157,7 +145,7 @@ static void __exit echainiv_module_exit(void)
crypto_unregister_template(&echainiv_tmpl);
}
-subsys_initcall(echainiv_module_init);
+module_init(echainiv_module_init);
module_exit(echainiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
index b3dd8a3ddeb7..2c0602f0cd40 100644
--- a/crypto/ecrdsa.c
+++ b/crypto/ecrdsa.c
@@ -249,7 +249,7 @@ static unsigned int ecrdsa_key_size(struct crypto_sig *tfm)
* Verify doesn't need any output, so it's just informational
* for keyctl to determine the key bit size.
*/
- return ctx->pub_key.ndigits * sizeof(u64);
+ return ctx->pub_key.ndigits * sizeof(u64) * BITS_PER_BYTE;
}
static unsigned int ecrdsa_max_size(struct crypto_sig *tfm)
diff --git a/crypto/essiv.c b/crypto/essiv.c
index ec0ec8992c2d..d003b78fcd85 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -548,8 +548,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
}
/* record the driver name so we can instantiate this exact algo later */
- strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME);
+ strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name);
/* Instance fields */
@@ -642,7 +641,7 @@ static void __exit essiv_module_exit(void)
crypto_unregister_template(&essiv_tmpl);
}
-subsys_initcall(essiv_module_init);
+module_init(essiv_module_init);
module_exit(essiv_module_exit);
MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption");
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 95a16e88899b..80036835cec5 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -411,7 +411,7 @@ static void __exit fcrypt_mod_fini(void)
crypto_unregister_alg(&fcrypt_alg);
}
-subsys_initcall(fcrypt_mod_init);
+module_init(fcrypt_mod_init);
module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/fips.c b/crypto/fips.c
index 2fa3a9ee61a1..e88a604cb42b 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -95,5 +95,5 @@ static void __exit fips_exit(void)
crypto_proc_fips_exit();
}
-subsys_initcall(fips_init);
+module_init(fips_init);
module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 84f7c23d14e4..97716482bed0 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -9,7 +9,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include <crypto/hash.h>
@@ -46,7 +45,6 @@ struct crypto_rfc4543_instance_ctx {
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
- struct crypto_sync_skcipher *null;
u8 nonce[4];
};
@@ -79,8 +77,6 @@ static struct {
struct scatterlist sg;
} *gcm_zeroes;
-static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc);
-
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
@@ -930,12 +926,12 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
unsigned int authsize = crypto_aead_authsize(aead);
u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
crypto_aead_alignmask(ctx->child) + 1);
- int err;
if (req->src != req->dst) {
- err = crypto_rfc4543_copy_src_to_dst(req, enc);
- if (err)
- return err;
+ unsigned int nbytes = req->assoclen + req->cryptlen -
+ (enc ? 0 : authsize);
+
+ memcpy_sglist(req->dst, req->src, nbytes);
}
memcpy(iv, ctx->nonce, 4);
@@ -952,22 +948,6 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq);
}
-static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
- unsigned int authsize = crypto_aead_authsize(aead);
- unsigned int nbytes = req->assoclen + req->cryptlen -
- (enc ? 0 : authsize);
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
-
- skcipher_request_set_sync_tfm(nreq, ctx->null);
- skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
-
- return crypto_skcipher_encrypt(nreq);
-}
-
static int crypto_rfc4543_encrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ?:
@@ -987,21 +967,13 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
struct crypto_aead_spawn *spawn = &ictx->aead;
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
- struct crypto_sync_skcipher *null;
unsigned long align;
- int err = 0;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_aead;
-
ctx->child = aead;
- ctx->null = null;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
@@ -1012,10 +984,6 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
align + GCM_AES_IV_SIZE);
return 0;
-
-err_free_aead:
- crypto_free_aead(aead);
- return err;
}
static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
@@ -1023,7 +991,6 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
@@ -1152,7 +1119,7 @@ static void __exit crypto_gcm_module_exit(void)
ARRAY_SIZE(crypto_gcm_tmpls));
}
-subsys_initcall(crypto_gcm_module_init);
+module_init(crypto_gcm_module_init);
module_exit(crypto_gcm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/geniv.c b/crypto/geniv.c
index bee4621b4f12..42eff6a7387c 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -9,7 +9,6 @@
#include <crypto/internal/geniv.h>
#include <crypto/internal/rng.h>
-#include <crypto/null.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -125,15 +124,10 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->sknull = crypto_get_default_null_skcipher();
- err = PTR_ERR(ctx->sknull);
- if (IS_ERR(ctx->sknull))
- goto out;
-
child = crypto_spawn_aead(aead_instance_ctx(inst));
err = PTR_ERR(child);
if (IS_ERR(child))
- goto drop_null;
+ goto out;
ctx->child = child;
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
@@ -143,10 +137,6 @@ int aead_init_geniv(struct crypto_aead *aead)
out:
return err;
-
-drop_null:
- crypto_put_default_null_skcipher();
- goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -155,7 +145,6 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index c70d163c1ac9..e5803c249c12 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -34,14 +34,14 @@
* (https://csrc.nist.gov/publications/detail/sp/800-38d/final)
*/
-#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/utils.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
static int ghash_init(struct shash_desc *desc)
{
@@ -82,59 +82,36 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- int n = min(srclen, dctx->bytes);
- u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
-
- if (!dctx->bytes)
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- while (srclen >= GHASH_BLOCK_SIZE) {
+ do {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
- }
-
- if (srclen) {
- dctx->bytes = GHASH_BLOCK_SIZE - srclen;
- while (srclen--)
- *dst++ ^= *src++;
- }
+ } while (srclen >= GHASH_BLOCK_SIZE);
- return 0;
+ return srclen;
}
-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static void ghash_flush(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
{
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- while (dctx->bytes--)
- *tmp++ ^= 0;
-
+ if (len) {
+ crypto_xor(dst, src, len);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
-
- dctx->bytes = 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- ghash_flush(ctx, dctx);
+ ghash_flush(desc, src, len);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
@@ -151,13 +128,14 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@@ -175,7 +153,7 @@ static void __exit ghash_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-subsys_initcall(ghash_mod_init);
+module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/hctr2.c b/crypto/hctr2.c
index cbcd673be481..c8932777bba8 100644
--- a/crypto/hctr2.c
+++ b/crypto/hctr2.c
@@ -570,7 +570,7 @@ static void __exit hctr2_module_exit(void)
ARRAY_SIZE(hctr2_tmpls));
}
-subsys_initcall(hctr2_module_init);
+module_init(hctr2_module_init);
module_exit(hctr2_module_exit);
MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode");
diff --git a/crypto/hkdf.c b/crypto/hkdf.c
index 2434c5c42545..f24c2a8d4df9 100644
--- a/crypto/hkdf.c
+++ b/crypto/hkdf.c
@@ -543,7 +543,7 @@ static int __init crypto_hkdf_module_init(void)
{
int ret = 0, i;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return 0;
for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) {
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 7cec25ff9889..148af460ae97 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -13,13 +13,11 @@
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/fips.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
struct hmac_ctx {
@@ -28,6 +26,12 @@ struct hmac_ctx {
u8 pads[];
};
+struct ahash_hmac_ctx {
+ struct crypto_ahash *hash;
+ /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */
+ u8 pads[];
+};
+
static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
@@ -39,7 +43,7 @@ static int hmac_setkey(struct crypto_shash *parent,
u8 *ipad = &tctx->pads[0];
u8 *opad = &tctx->pads[ss];
SHASH_DESC_ON_STACK(shash, hash);
- unsigned int i;
+ int err, i;
if (fips_enabled && (keylen < 112 / 8))
return -EINVAL;
@@ -65,12 +69,14 @@ static int hmac_setkey(struct crypto_shash *parent,
opad[i] ^= HMAC_OPAD_VALUE;
}
- return crypto_shash_init(shash) ?:
- crypto_shash_update(shash, ipad, bs) ?:
- crypto_shash_export(shash, ipad) ?:
- crypto_shash_init(shash) ?:
- crypto_shash_update(shash, opad, bs) ?:
- crypto_shash_export(shash, opad);
+ err = crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, ipad, bs) ?:
+ crypto_shash_export(shash, ipad) ?:
+ crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, opad, bs) ?:
+ crypto_shash_export(shash, opad);
+ shash_desc_zero(shash);
+ return err;
}
static int hmac_export(struct shash_desc *pdesc, void *out)
@@ -90,6 +96,22 @@ static int hmac_import(struct shash_desc *pdesc, const void *in)
return crypto_shash_import(desc, in);
}
+static int hmac_export_core(struct shash_desc *pdesc, void *out)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ return crypto_shash_export_core(desc, out);
+}
+
+static int hmac_import_core(struct shash_desc *pdesc, const void *in)
+{
+ const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ desc->tfm = tctx->hash;
+ return crypto_shash_import_core(desc, in);
+}
+
static int hmac_init(struct shash_desc *pdesc)
{
const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
@@ -105,20 +127,6 @@ static int hmac_update(struct shash_desc *pdesc,
return crypto_shash_update(desc, data, nbytes);
}
-static int hmac_final(struct shash_desc *pdesc, u8 *out)
-{
- struct crypto_shash *parent = pdesc->tfm;
- int ds = crypto_shash_digestsize(parent);
- int ss = crypto_shash_statesize(parent);
- const struct hmac_ctx *tctx = crypto_shash_ctx(parent);
- const u8 *opad = &tctx->pads[ss];
- struct shash_desc *desc = shash_desc_ctx(pdesc);
-
- return crypto_shash_final(desc, out) ?:
- crypto_shash_import(desc, opad) ?:
- crypto_shash_finup(desc, out, ds, out);
-}
-
static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
unsigned int nbytes, u8 *out)
{
@@ -146,9 +154,6 @@ static int hmac_init_tfm(struct crypto_shash *parent)
if (IS_ERR(hash))
return PTR_ERR(hash);
- parent->descsize = sizeof(struct shash_desc) +
- crypto_shash_descsize(hash);
-
tctx->hash = hash;
return 0;
}
@@ -174,26 +179,23 @@ static void hmac_exit_tfm(struct crypto_shash *parent)
crypto_free_shash(tctx->hash);
}
-static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+static int __hmac_create_shash(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 mask)
{
struct shash_instance *inst;
struct crypto_shash_spawn *spawn;
struct crypto_alg *alg;
struct shash_alg *salg;
- u32 mask;
int err;
int ds;
int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
- if (err)
- return err;
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
+ mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
@@ -212,7 +214,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
ss < alg->cra_blocksize)
goto err_free_inst;
- err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
+ err = crypto_inst_setname(shash_crypto_instance(inst), "hmac",
+ "hmac-shash", alg);
if (err)
goto err_free_inst;
@@ -222,12 +225,14 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.digestsize = ds;
inst->alg.statesize = ss;
+ inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize;
inst->alg.init = hmac_init;
inst->alg.update = hmac_update;
- inst->alg.final = hmac_final;
inst->alg.finup = hmac_finup;
inst->alg.export = hmac_export;
inst->alg.import = hmac_import;
+ inst->alg.export_core = hmac_export_core;
+ inst->alg.import_core = hmac_import_core;
inst->alg.setkey = hmac_setkey;
inst->alg.init_tfm = hmac_init_tfm;
inst->alg.clone_tfm = hmac_clone_tfm;
@@ -243,23 +248,332 @@ err_free_inst:
return err;
}
-static struct crypto_template hmac_tmpl = {
- .name = "hmac",
- .create = hmac_create,
- .module = THIS_MODULE,
+static int hmac_setkey_ahash(struct crypto_ahash *parent,
+ const u8 *inkey, unsigned int keylen)
+{
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+ struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash);
+ int ds = crypto_ahash_digestsize(parent);
+ int bs = crypto_ahash_blocksize(parent);
+ int ss = crypto_ahash_statesize(parent);
+ HASH_REQUEST_ON_STACK(req, fb);
+ u8 *opad = &tctx->pads[ss];
+ u8 *ipad = &tctx->pads[0];
+ int err, i;
+
+ if (fips_enabled && (keylen < 112 / 8))
+ return -EINVAL;
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+
+ if (keylen > bs) {
+ ahash_request_set_virt(req, inkey, ipad, keylen);
+ err = crypto_ahash_digest(req);
+ if (err)
+ goto out_zero_req;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, inkey, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= HMAC_IPAD_VALUE;
+ opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ ahash_request_set_virt(req, ipad, NULL, bs);
+ err = crypto_ahash_init(req) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, ipad);
+
+ ahash_request_set_virt(req, opad, NULL, bs);
+ err = err ?:
+ crypto_ahash_init(req) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, opad);
+
+out_zero_req:
+ HASH_REQUEST_ZERO(req);
+ return err;
+}
+
+static int hmac_export_ahash(struct ahash_request *preq, void *out)
+{
+ return crypto_ahash_export(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_ahash(struct ahash_request *preq, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_tfm(req, tctx->hash);
+ return crypto_ahash_import(req, in);
+}
+
+static int hmac_export_core_ahash(struct ahash_request *preq, void *out)
+{
+ return crypto_ahash_export_core(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_core_ahash(struct ahash_request *preq, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_tfm(req, tctx->hash);
+ return crypto_ahash_import_core(req, in);
+}
+
+static int hmac_init_ahash(struct ahash_request *preq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ return hmac_import_ahash(preq, &tctx->pads[0]);
+}
+
+static int hmac_update_ahash(struct ahash_request *preq)
+{
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_callback(req, ahash_request_flags(preq),
+ preq->base.complete, preq->base.data);
+ if (ahash_request_isvirt(preq))
+ ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes);
+ else
+ ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes);
+ return crypto_ahash_update(req);
+}
+
+static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_request *req = ahash_request_ctx(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ int ds = crypto_ahash_digestsize(tfm);
+ int ss = crypto_ahash_statesize(tfm);
+ const u8 *opad = &tctx->pads[ss];
+
+ ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask,
+ preq->base.complete, preq->base.data);
+ ahash_request_set_virt(req, preq->result, preq->result, ds);
+ return crypto_ahash_import(req, opad) ?:
+ crypto_ahash_finup(req);
+
+}
+
+static void hmac_finup_done(void *data, int err)
+{
+ struct ahash_request *preq = data;
+
+ if (err)
+ goto out;
+
+ err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ ahash_request_complete(preq, err);
+}
+
+static int hmac_finup_ahash(struct ahash_request *preq)
+{
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_callback(req, ahash_request_flags(preq),
+ hmac_finup_done, preq);
+ if (ahash_request_isvirt(preq))
+ ahash_request_set_virt(req, preq->svirt, preq->result,
+ preq->nbytes);
+ else
+ ahash_request_set_crypt(req, preq->src, preq->result,
+ preq->nbytes);
+ return crypto_ahash_finup(req) ?:
+ hmac_finup_finish(preq, 0);
+}
+
+static int hmac_digest_ahash(struct ahash_request *preq)
+{
+ return hmac_init_ahash(preq) ?:
+ hmac_finup_ahash(preq);
+}
+
+static int hmac_init_ahash_tfm(struct crypto_ahash *parent)
+{
+ struct ahash_instance *inst = ahash_alg_instance(parent);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+ struct crypto_ahash *hash;
+
+ hash = crypto_spawn_ahash(ahash_instance_ctx(inst));
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(hash))
+ return -EINVAL;
+
+ tctx->hash = hash;
+ return 0;
+}
+
+static int hmac_clone_ahash_tfm(struct crypto_ahash *dst,
+ struct crypto_ahash *src)
+{
+ struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src);
+ struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst);
+ struct crypto_ahash *hash;
+
+ hash = crypto_clone_ahash(sctx->hash);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ dctx->hash = hash;
+ return 0;
+}
+
+static void hmac_exit_ahash_tfm(struct crypto_ahash *parent)
+{
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+
+ crypto_free_ahash(tctx->hash);
+}
+
+static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb,
+ u32 mask)
+{
+ struct crypto_ahash_spawn *spawn;
+ struct ahash_instance *inst;
+ struct crypto_alg *alg;
+ struct hash_alg_common *halg;
+ int ds, ss, err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = ahash_instance_ctx(inst);
+
+ mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+ halg = crypto_spawn_ahash_alg(spawn);
+ alg = &halg->base;
+
+ /* The underlying hash algorithm must not require a key */
+ err = -EINVAL;
+ if (crypto_hash_alg_needs_key(halg))
+ goto err_free_inst;
+
+ ds = halg->digestsize;
+ ss = halg->statesize;
+ if (ds > alg->cra_blocksize || ss < alg->cra_blocksize)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg);
+ if (err)
+ goto err_free_inst;
+
+ inst->alg.halg.base.cra_flags = alg->cra_flags &
+ CRYPTO_ALG_INHERITED_FLAGS;
+ inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT;
+ inst->alg.halg.base.cra_priority = alg->cra_priority + 100;
+ inst->alg.halg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) +
+ (ss * 2);
+ inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) +
+ alg->cra_reqsize;
+
+ inst->alg.halg.digestsize = ds;
+ inst->alg.halg.statesize = ss;
+ inst->alg.init = hmac_init_ahash;
+ inst->alg.update = hmac_update_ahash;
+ inst->alg.finup = hmac_finup_ahash;
+ inst->alg.digest = hmac_digest_ahash;
+ inst->alg.export = hmac_export_ahash;
+ inst->alg.import = hmac_import_ahash;
+ inst->alg.export_core = hmac_export_core_ahash;
+ inst->alg.import_core = hmac_import_core_ahash;
+ inst->alg.setkey = hmac_setkey_ahash;
+ inst->alg.init_tfm = hmac_init_ahash_tfm;
+ inst->alg.clone_tfm = hmac_clone_ahash_tfm;
+ inst->alg.exit_tfm = hmac_exit_ahash_tfm;
+
+ inst->free = ahash_free_singlespawn_instance;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+err_free_inst:
+ ahash_free_singlespawn_instance(inst);
+ }
+ return err;
+}
+
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ u32 mask;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ mask = crypto_algt_inherited_mask(algt);
+
+ if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) &
+ algt->mask & CRYPTO_ALG_TYPE_MASK))
+ return hmac_create_ahash(tmpl, tb, mask);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) &
+ algt->mask & CRYPTO_ALG_TYPE_MASK)
+ return -EINVAL;
+
+ return __hmac_create_shash(tmpl, tb, mask);
+}
+
+static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
+ if (err)
+ return err == -EINVAL ? -ENOENT : err;
+
+ return __hmac_create_shash(tmpl, tb, mask);
+}
+
+static struct crypto_template hmac_tmpls[] = {
+ {
+ .name = "hmac",
+ .create = hmac_create,
+ .module = THIS_MODULE,
+ },
+ {
+ .name = "hmac-shash",
+ .create = hmac_create_shash,
+ .module = THIS_MODULE,
+ },
};
static int __init hmac_module_init(void)
{
- return crypto_register_template(&hmac_tmpl);
+ return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
}
static void __exit hmac_module_exit(void)
{
- crypto_unregister_template(&hmac_tmpl);
+ crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
}
-subsys_initcall(hmac_module_init);
+module_init(hmac_module_init);
module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/internal.h b/crypto/internal.h
index 11567ea24fc3..b9afd68767c1 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -46,6 +46,7 @@ struct crypto_type {
unsigned int maskclear;
unsigned int maskset;
unsigned int tfmsize;
+ unsigned int algsize;
};
enum {
@@ -66,8 +67,7 @@ extern struct blocking_notifier_head crypto_chain;
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || \
- IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
static inline bool crypto_boot_test_finished(void)
{
return true;
@@ -86,7 +86,7 @@ static inline void set_crypto_boot_test_finished(void)
static_branch_enable(&__crypto_boot_test_finished);
}
#endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) ||
- * IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+ * !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
*/
#ifdef CONFIG_PROC_FS
@@ -128,7 +128,6 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend, int node);
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm);
-void crypto_destroy_alg(struct crypto_alg *alg);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
@@ -163,6 +162,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
return alg;
}
+void crypto_destroy_alg(struct crypto_alg *alg);
+
static inline void crypto_alg_put(struct crypto_alg *alg)
{
if (refcount_dec_and_test(&alg->cra_refcnt))
diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c
index c3f9938e1ad2..b7a6bf9da773 100644
--- a/crypto/kdf_sp800108.c
+++ b/crypto/kdf_sp800108.c
@@ -127,7 +127,7 @@ static int __init crypto_kdf108_init(void)
{
int ret;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return 0;
ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)",
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 7ad338ca2c18..024264ee9cd1 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -871,7 +871,7 @@ static void __exit khazad_mod_fini(void)
}
-subsys_initcall(khazad_mod_init);
+module_init(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/kpp.c b/crypto/kpp.c
index ecc63a1a948d..2e0cefe7a25f 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -80,6 +80,7 @@ static const struct crypto_type crypto_kpp_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
.tfmsize = offsetof(struct crypto_kpp, base),
+ .algsize = offsetof(struct kpp_alg, base),
};
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c
index d07769bf149e..a1de55994d92 100644
--- a/crypto/krb5enc.c
+++ b/crypto/krb5enc.c
@@ -496,7 +496,7 @@ static void __exit crypto_krb5enc_module_exit(void)
crypto_unregister_template(&crypto_krb5enc_tmpl);
}
-subsys_initcall(crypto_krb5enc_module_init);
+module_init(crypto_krb5enc_module_init);
module_exit(crypto_krb5enc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 391ae0f7641f..dd403b800513 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
- if (err == -ENOENT) {
+ if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
err = -ENAMETOOLONG;
if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -356,7 +356,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
- if (!strncmp(cipher_name, "ecb(", 4)) {
+ if (!memcmp(cipher_name, "ecb(", 4)) {
int len;
len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
@@ -420,7 +420,7 @@ static void __exit lrw_module_exit(void)
crypto_unregister_template(&lrw_tmpl);
}
-subsys_initcall(lrw_module_init);
+module_init(lrw_module_init);
module_exit(lrw_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index cdb4897c63e6..c2e2c38b5aa8 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -294,6 +294,7 @@ static const struct crypto_type crypto_lskcipher_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_LSKCIPHER,
.tfmsize = offsetof(struct crypto_lskcipher, base),
+ .algsize = offsetof(struct lskcipher_alg, co.base),
};
static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 82588607fb2e..7a984ae5ae52 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -12,10 +12,6 @@
#include <linux/lz4.h>
#include <crypto/internal/scompress.h>
-struct lz4_ctx {
- void *lz4_comp_mem;
-};
-
static void *lz4_alloc_ctx(void)
{
void *ctx;
@@ -93,7 +89,7 @@ static void __exit lz4_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lz4_mod_init);
+module_init(lz4_mod_init);
module_exit(lz4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 997e76c0183a..9c61d05b6214 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -10,10 +10,6 @@
#include <linux/vmalloc.h>
#include <linux/lz4.h>
-struct lz4hc_ctx {
- void *lz4hc_comp_mem;
-};
-
static void *lz4hc_alloc_ctx(void)
{
void *ctx;
@@ -91,7 +87,7 @@ static void __exit lz4hc_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lz4hc_mod_init);
+module_init(lz4hc_mod_init);
module_exit(lz4hc_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index b1350ae278b8..ba013f2d5090 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -9,10 +9,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-struct lzorle_ctx {
- void *lzorle_comp_mem;
-};
-
static void *lzorle_alloc_ctx(void)
{
void *ctx;
@@ -95,7 +91,7 @@ static void __exit lzorle_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lzorle_mod_init);
+module_init(lzorle_mod_init);
module_exit(lzorle_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index dfe5a07ca35f..7867e2c67c4e 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -9,10 +9,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-struct lzo_ctx {
- void *lzo_comp_mem;
-};
-
static void *lzo_alloc_ctx(void)
{
void *ctx;
@@ -95,7 +91,7 @@ static void __exit lzo_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lzo_mod_init);
+module_init(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md4.c b/crypto/md4.c
index 2e7f2f319f95..55bf47e23c13 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -233,7 +233,7 @@ static void __exit md4_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(md4_mod_init);
+module_init(md4_mod_init);
module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md5.c b/crypto/md5.c
index 72c0c46fb5ee..32c0819f5118 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -17,11 +17,9 @@
*/
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
@@ -120,10 +118,11 @@ static void md5_transform(__u32 *hash, __u32 const *in)
hash[3] += d;
}
-static inline void md5_transform_helper(struct md5_state *ctx)
+static inline void md5_transform_helper(struct md5_state *ctx,
+ u32 block[MD5_BLOCK_WORDS])
{
- le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
- md5_transform(ctx->hash, ctx->block);
+ le32_to_cpu_array(block, MD5_BLOCK_WORDS);
+ md5_transform(ctx->hash, block);
}
static int md5_init(struct shash_desc *desc)
@@ -142,76 +141,53 @@ static int md5_init(struct shash_desc *desc)
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+ u32 block[MD5_BLOCK_WORDS];
mctx->byte_count += len;
-
- if (avail > len) {
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, len);
- return 0;
- }
-
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, avail);
-
- md5_transform_helper(mctx);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(mctx->block)) {
- memcpy(mctx->block, data, sizeof(mctx->block));
- md5_transform_helper(mctx);
- data += sizeof(mctx->block);
- len -= sizeof(mctx->block);
- }
-
- memcpy(mctx->block, data, len);
-
- return 0;
+ do {
+ memcpy(block, data, sizeof(block));
+ md5_transform_helper(mctx, block);
+ data += sizeof(block);
+ len -= sizeof(block);
+ } while (len >= sizeof(block));
+ memzero_explicit(block, sizeof(block));
+ mctx->byte_count -= len;
+ return len;
}
-static int md5_final(struct shash_desc *desc, u8 *out)
+static int md5_finup(struct shash_desc *desc, const u8 *data, unsigned int len,
+ u8 *out)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- const unsigned int offset = mctx->byte_count & 0x3f;
- char *p = (char *)mctx->block + offset;
- int padding = 56 - (offset + 1);
+ u32 block[MD5_BLOCK_WORDS];
+ unsigned int offset;
+ int padding;
+ char *p;
+
+ memcpy(block, data, len);
+
+ offset = len;
+ p = (char *)block + offset;
+ padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof (u64));
- md5_transform_helper(mctx);
- p = (char *)mctx->block;
+ md5_transform_helper(mctx, block);
+ p = (char *)block;
padding = 56;
}
memset(p, 0, padding);
- mctx->block[14] = mctx->byte_count << 3;
- mctx->block[15] = mctx->byte_count >> 29;
- le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
- sizeof(u64)) / sizeof(u32));
- md5_transform(mctx->hash, mctx->block);
+ mctx->byte_count += len;
+ block[14] = mctx->byte_count << 3;
+ block[15] = mctx->byte_count >> 29;
+ le32_to_cpu_array(block, (sizeof(block) - sizeof(u64)) / sizeof(u32));
+ md5_transform(mctx->hash, block);
+ memzero_explicit(block, sizeof(block));
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
memcpy(out, mctx->hash, sizeof(mctx->hash));
- memset(mctx, 0, sizeof(*mctx));
-
- return 0;
-}
-
-static int md5_export(struct shash_desc *desc, void *out)
-{
- struct md5_state *ctx = shash_desc_ctx(desc);
-
- memcpy(out, ctx, sizeof(*ctx));
- return 0;
-}
-
-static int md5_import(struct shash_desc *desc, const void *in)
-{
- struct md5_state *ctx = shash_desc_ctx(desc);
- memcpy(ctx, in, sizeof(*ctx));
return 0;
}
@@ -219,14 +195,12 @@ static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_init,
.update = md5_update,
- .final = md5_final,
- .export = md5_export,
- .import = md5_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
+ .finup = md5_finup,
+ .descsize = MD5_STATE_SIZE,
.base = {
.cra_name = "md5",
.cra_driver_name = "md5-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -242,7 +216,7 @@ static void __exit md5_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(md5_mod_init);
+module_init(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 0d14e980d4d6..69ad35f524d7 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -167,7 +167,7 @@ static void __exit michael_mic_exit(void)
}
-subsys_initcall(michael_mic_init);
+module_init(michael_mic_init);
module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index a661d4f667cd..2b648615b5ec 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -245,7 +245,7 @@ static void __exit nhpoly1305_mod_exit(void)
crypto_unregister_shash(&nhpoly1305_alg);
}
-subsys_initcall(nhpoly1305_mod_init);
+module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 9d2e56d6744a..d092717ea4fc 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -186,7 +186,7 @@ static void __exit crypto_pcbc_module_exit(void)
crypto_unregister_template(&crypto_pcbc_tmpl);
}
-subsys_initcall(crypto_pcbc_module_init);
+module_init(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 7fc79e7dce44..c33d29a523e0 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -381,7 +381,7 @@ static void __exit pcrypt_exit(void)
kset_unregister(pcrypt_kset);
}
-subsys_initcall(pcrypt_init);
+module_init(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
deleted file mode 100644
index e6f29a98725a..000000000000
--- a/crypto/poly1305_generic.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Poly1305 authenticator algorithm, RFC7539
- *
- * Copyright (C) 2015 Martin Willi
- *
- * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/unaligned.h>
-
-static int crypto_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- poly1305_core_init(&dctx->h);
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
-{
- if (!dctx->sset) {
- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_core_setkey(&dctx->core_r, src);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->rset = 2;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return srclen;
-}
-
-static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int srclen)
-{
- unsigned int datalen;
-
- if (unlikely(!dctx->sset)) {
- datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
- src += srclen - datalen;
- srclen = datalen;
- }
-
- poly1305_core_blocks(&dctx->h, &dctx->core_r, src,
- srclen / POLY1305_BLOCK_SIZE, 1);
-}
-
-static int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int bytes;
-
- if (unlikely(dctx->buflen)) {
- bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- srclen -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE);
- dctx->buflen = 0;
- }
- }
-
- if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- poly1305_blocks(dctx, src, srclen);
- src += srclen - (srclen % POLY1305_BLOCK_SIZE);
- srclen %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(srclen)) {
- dctx->buflen = srclen;
- memcpy(dctx->buf, src, srclen);
- }
-
- return 0;
-}
-
-static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_generic(dctx, dst);
- return 0;
-}
-
-static struct shash_alg poly1305_alg = {
- .digestsize = POLY1305_DIGEST_SIZE,
- .init = crypto_poly1305_init,
- .update = crypto_poly1305_update,
- .final = crypto_poly1305_final,
- .descsize = sizeof(struct poly1305_desc_ctx),
- .base = {
- .cra_name = "poly1305",
- .cra_driver_name = "poly1305-generic",
- .cra_priority = 100,
- .cra_blocksize = POLY1305_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init poly1305_mod_init(void)
-{
- return crypto_register_shash(&poly1305_alg);
-}
-
-static void __exit poly1305_mod_exit(void)
-{
- crypto_unregister_shash(&poly1305_alg);
-}
-
-subsys_initcall(poly1305_mod_init);
-module_exit(poly1305_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("Poly1305 authenticator");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-generic");
diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c
index 4f98910bcdb5..db8adb56e4ca 100644
--- a/crypto/polyval-generic.c
+++ b/crypto/polyval-generic.c
@@ -44,15 +44,15 @@
*
*/
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
-#include <crypto/polyval.h>
#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/polyval.h>
+#include <crypto/utils.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
struct polyval_tfm_ctx {
struct gf128mul_4k *gf128;
@@ -63,7 +63,6 @@ struct polyval_desc_ctx {
u8 buffer[POLYVAL_BLOCK_SIZE];
be128 buffer128;
};
- u32 bytes;
};
static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
@@ -76,46 +75,6 @@ static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
put_unaligned(swab64(b), (u64 *)&dst[0]);
}
-/*
- * Performs multiplication in the POLYVAL field using the GHASH field as a
- * subroutine. This function is used as a fallback for hardware accelerated
- * implementations when simd registers are unavailable.
- *
- * Note: This function is not used for polyval-generic, instead we use the 4k
- * lookup table implementation for finite field multiplication.
- */
-void polyval_mul_non4k(u8 *op1, const u8 *op2)
-{
- be128 a, b;
-
- // Assume one argument is in Montgomery form and one is not.
- copy_and_reverse((u8 *)&a, op1);
- copy_and_reverse((u8 *)&b, op2);
- gf128mul_x_lle(&a, &a);
- gf128mul_lle(&a, &b);
- copy_and_reverse(op1, (u8 *)&a);
-}
-EXPORT_SYMBOL_GPL(polyval_mul_non4k);
-
-/*
- * Perform a POLYVAL update using non4k multiplication. This function is used
- * as a fallback for hardware accelerated implementations when simd registers
- * are unavailable.
- *
- * Note: This function is not used for polyval-generic, instead we use the 4k
- * lookup table implementation of finite field multiplication.
- */
-void polyval_update_non4k(const u8 *key, const u8 *in,
- size_t nblocks, u8 *accumulator)
-{
- while (nblocks--) {
- crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE);
- polyval_mul_non4k(accumulator, key);
- in += POLYVAL_BLOCK_SIZE;
- }
-}
-EXPORT_SYMBOL_GPL(polyval_update_non4k);
-
static int polyval_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
@@ -154,56 +113,53 @@ static int polyval_update(struct shash_desc *desc,
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
- u8 *pos;
u8 tmp[POLYVAL_BLOCK_SIZE];
- int n;
-
- if (dctx->bytes) {
- n = min(srclen, dctx->bytes);
- pos = dctx->buffer + dctx->bytes - 1;
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos-- ^= *src++;
- if (!dctx->bytes)
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- }
-
- while (srclen >= POLYVAL_BLOCK_SIZE) {
+ do {
copy_and_reverse(tmp, src);
crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE);
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
src += POLYVAL_BLOCK_SIZE;
srclen -= POLYVAL_BLOCK_SIZE;
- }
+ } while (srclen >= POLYVAL_BLOCK_SIZE);
+
+ return srclen;
+}
- if (srclen) {
- dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
- pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1;
- while (srclen--)
- *pos-- ^= *src++;
+static int polyval_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
+{
+ struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (len) {
+ u8 tmp[POLYVAL_BLOCK_SIZE] = {};
+
+ memcpy(tmp, src, len);
+ polyval_update(desc, tmp, POLYVAL_BLOCK_SIZE);
}
+ copy_and_reverse(dst, dctx->buffer);
+ return 0;
+}
+
+static int polyval_export(struct shash_desc *desc, void *out)
+{
+ struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
+ copy_and_reverse(out, dctx->buffer);
return 0;
}
-static int polyval_final(struct shash_desc *desc, u8 *dst)
+static int polyval_import(struct shash_desc *desc, const void *in)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
- const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
- if (dctx->bytes)
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- copy_and_reverse(dst, dctx->buffer);
+ copy_and_reverse(dctx->buffer, in);
return 0;
}
-static void polyval_exit_tfm(struct crypto_tfm *tfm)
+static void polyval_exit_tfm(struct crypto_shash *tfm)
{
- struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm);
gf128mul_free_4k(ctx->gf128);
}
@@ -212,17 +168,21 @@ static struct shash_alg polyval_alg = {
.digestsize = POLYVAL_DIGEST_SIZE,
.init = polyval_init,
.update = polyval_update,
- .final = polyval_final,
+ .finup = polyval_finup,
.setkey = polyval_setkey,
+ .export = polyval_export,
+ .import = polyval_import,
+ .exit_tfm = polyval_exit_tfm,
+ .statesize = sizeof(struct polyval_desc_ctx),
.descsize = sizeof(struct polyval_desc_ctx),
.base = {
.cra_name = "polyval",
.cra_driver_name = "polyval-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = POLYVAL_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct polyval_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_exit = polyval_exit_tfm,
},
};
@@ -236,7 +196,7 @@ static void __exit polyval_mod_exit(void)
crypto_unregister_shash(&polyval_alg);
}
-subsys_initcall(polyval_mod_init);
+module_init(polyval_mod_init);
module_exit(polyval_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index c5fe4034b153..9860b60c9be4 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -9,18 +9,14 @@
* Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
+#include <linux/string.h>
#include "ripemd.h"
struct rmd160_ctx {
u64 byte_count;
u32 state[5];
- __le32 buffer[16];
};
#define K1 RMD_K1
@@ -265,72 +261,59 @@ static int rmd160_init(struct shash_desc *desc)
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
- memset(rctx->buffer, 0, sizeof(rctx->buffer));
-
return 0;
}
static int rmd160_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ int remain = len - round_down(len, RMD160_BLOCK_SIZE);
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
-
- rctx->byte_count += len;
+ __le32 buffer[RMD160_BLOCK_SIZE / 4];
- /* Enough space in buffer? If so copy and we're done */
- if (avail > len) {
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, len);
- goto out;
- }
-
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, avail);
+ rctx->byte_count += len - remain;
- rmd160_transform(rctx->state, rctx->buffer);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(rctx->buffer)) {
- memcpy(rctx->buffer, data, sizeof(rctx->buffer));
- rmd160_transform(rctx->state, rctx->buffer);
- data += sizeof(rctx->buffer);
- len -= sizeof(rctx->buffer);
- }
+ do {
+ memcpy(buffer, data, sizeof(buffer));
+ rmd160_transform(rctx->state, buffer);
+ data += sizeof(buffer);
+ len -= sizeof(buffer);
+ } while (len >= sizeof(buffer));
- memcpy(rctx->buffer, data, len);
-
-out:
- return 0;
+ memzero_explicit(buffer, sizeof(buffer));
+ return remain;
}
/* Add padding and return the message digest. */
-static int rmd160_final(struct shash_desc *desc, u8 *out)
+static int rmd160_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
+ unsigned int bit_offset = RMD160_BLOCK_SIZE / 8 - 1;
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
- u32 i, index, padlen;
- __le64 bits;
+ union {
+ __le64 l64[RMD160_BLOCK_SIZE / 4];
+ __le32 l32[RMD160_BLOCK_SIZE / 2];
+ u8 u8[RMD160_BLOCK_SIZE * 2];
+ } block = {};
__le32 *dst = (__le32 *)out;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_le64(rctx->byte_count << 3);
-
- /* Pad out to 56 mod 64 */
- index = rctx->byte_count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- rmd160_update(desc, padding, padlen);
+ u32 i;
- /* Append length */
- rmd160_update(desc, (const u8 *)&bits, sizeof(bits));
+ rctx->byte_count += len;
+ if (len >= bit_offset * 8)
+ bit_offset += RMD160_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ block.l64[bit_offset] = cpu_to_le64(rctx->byte_count << 3);
+
+ rmd160_transform(rctx->state, block.l32);
+ if (bit_offset > RMD160_BLOCK_SIZE / 8)
+ rmd160_transform(rctx->state,
+ block.l32 + RMD160_BLOCK_SIZE / 4);
+ memzero_explicit(&block, sizeof(block));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
-
- /* Wipe context */
- memset(rctx, 0, sizeof(*rctx));
-
return 0;
}
@@ -338,11 +321,12 @@ static struct shash_alg alg = {
.digestsize = RMD160_DIGEST_SIZE,
.init = rmd160_init,
.update = rmd160_update,
- .final = rmd160_final,
+ .finup = rmd160_finup,
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
.cra_driver_name = "rmd160-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -358,7 +342,7 @@ static void __exit rmd160_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(rmd160_mod_init);
+module_init(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rng.c b/crypto/rng.c
index 9d8804e46422..b8ae6ebc091d 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -98,6 +98,7 @@ static const struct crypto_type crypto_rng_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
.tfmsize = offsetof(struct crypto_rng, base),
+ .algsize = offsetof(struct rng_alg, base),
};
struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
diff --git a/crypto/rsa.c b/crypto/rsa.c
index b7d21529c552..6c7734083c98 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -430,7 +430,7 @@ static void __exit rsa_exit(void)
crypto_unregister_akcipher(&rsa);
}
-subsys_initcall(rsa_init);
+module_init(rsa_init);
module_exit(rsa_exit);
MODULE_ALIAS_CRYPTO("rsa");
MODULE_LICENSE("GPL");
diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c
index d01ac75635e0..94fa5e9600e7 100644
--- a/crypto/rsassa-pkcs1.c
+++ b/crypto/rsassa-pkcs1.c
@@ -301,7 +301,7 @@ static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
- return ctx->key_size;
+ return ctx->key_size * BITS_PER_BYTE;
}
static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm,
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 8225801488d5..1d010e2a1b1a 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -10,10 +10,25 @@
*/
#include <crypto/scatterwalk.h>
+#include <linux/crypto.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+enum {
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
+};
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+ return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes)
{
@@ -89,27 +104,23 @@ EXPORT_SYMBOL_GPL(memcpy_to_sglist);
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct scatter_walk swalk;
- struct scatter_walk dwalk;
+ struct skcipher_walk walk = {};
if (unlikely(nbytes == 0)) /* in case sg == NULL */
return;
- scatterwalk_start(&swalk, src);
- scatterwalk_start(&dwalk, dst);
+ walk.total = nbytes;
+
+ scatterwalk_start(&walk.in, src);
+ scatterwalk_start(&walk.out, dst);
+ skcipher_walk_first(&walk, true);
do {
- unsigned int slen, dlen;
- unsigned int len;
-
- slen = scatterwalk_next(&swalk, nbytes);
- dlen = scatterwalk_next(&dwalk, nbytes);
- len = min(slen, dlen);
- memcpy(dwalk.addr, swalk.addr, len);
- scatterwalk_done_dst(&dwalk, len);
- scatterwalk_done_src(&swalk, len);
- nbytes -= len;
- } while (nbytes);
+ if (walk.src.virt.addr != walk.dst.virt.addr)
+ memcpy(walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes);
+ skcipher_walk_done(&walk, 0);
+ } while (walk.nbytes);
}
EXPORT_SYMBOL_GPL(memcpy_sglist);
@@ -135,3 +146,236 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
return dst;
}
EXPORT_SYMBOL_GPL(scatterwalk_ffwd);
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned n;
+ void *buffer;
+
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
+ }
+
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
+
+ walk->nbytes = bsize;
+ walk->flags |= SKCIPHER_WALK_SLOW;
+
+ return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+ void *tmp = walk->page;
+
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
+
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
+ return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+ unsigned long diff;
+
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
+
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
+
+ if (diff) {
+ walk->flags |= SKCIPHER_WALK_DIFF;
+ scatterwalk_map(&walk->in);
+ }
+
+ return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+ unsigned int bsize;
+ unsigned int n;
+
+ n = walk->total;
+ bsize = min(walk->stride, max(n, walk->blocksize));
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+ if (unlikely(walk->total < walk->blocksize))
+ return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+ return skcipher_next_slow(walk, bsize);
+ }
+ walk->nbytes = n;
+
+ if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+ if (!walk->page) {
+ gfp_t gfp = skcipher_walk_gfp(walk);
+
+ walk->page = (void *)__get_free_page(gfp);
+ if (!walk->page)
+ goto slow_path;
+ }
+ walk->flags |= SKCIPHER_WALK_COPY;
+ return skcipher_next_copy(walk);
+ }
+
+ return skcipher_next_fast(walk);
+}
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned ivsize = walk->ivsize;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
+ unsigned size;
+ u8 *iv;
+
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+ if (!walk->buffer)
+ return -ENOMEM;
+
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
+
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+}
+
+int skcipher_walk_first(struct skcipher_walk *walk, bool atomic)
+{
+ if (WARN_ON_ONCE(in_hardirq()))
+ return -EDEADLK;
+
+ walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP;
+
+ walk->buffer = NULL;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = skcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+
+ walk->page = NULL;
+
+ return skcipher_walk_next(walk);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_first);
+
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
+{
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
+
+ if (!n)
+ goto finish;
+
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
+ }
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
+ scatterwalk_advance(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+ scatterwalk_done_src(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_COPY) {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+ * the message wasn't evenly divisible into blocks but
+ * the algorithm requires it.
+ */
+ res = -EINVAL;
+ total = 0;
+ } else
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
+ }
+
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
+
+ if (res > 0)
+ res = 0;
+
+ walk->total = total;
+ walk->nbytes = 0;
+
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
+ return skcipher_walk_next(walk);
+ }
+
+finish:
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
+
+ if (walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 5762fcc63b51..c651e7f2197a 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -7,9 +7,9 @@
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include <crypto/scatterwalk.h>
+#include <linux/cpumask.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/highmem.h>
@@ -20,20 +20,17 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
-#define SCOMP_SCRATCH_SIZE 65400
-
struct scomp_scratch {
spinlock_t lock;
union {
void *src;
unsigned long saddr;
};
- void *dst;
};
static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
@@ -44,6 +41,10 @@ static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
+static cpumask_t scomp_scratch_want;
+static void scomp_scratch_workfn(struct work_struct *work);
+static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn);
+
static int __maybe_unused crypto_scomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -74,82 +75,48 @@ static void crypto_scomp_free_scratches(void)
scratch = per_cpu_ptr(&scomp_scratch, i);
free_page(scratch->saddr);
- vfree(scratch->dst);
scratch->src = NULL;
- scratch->dst = NULL;
}
}
-static int crypto_scomp_alloc_scratches(void)
+static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu)
{
- struct scomp_scratch *scratch;
- int i;
-
- for_each_possible_cpu(i) {
- struct page *page;
- void *mem;
-
- scratch = per_cpu_ptr(&scomp_scratch, i);
+ int node = cpu_to_node(cpu);
+ struct page *page;
- page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0);
- if (!page)
- goto error;
- scratch->src = page_address(page);
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
- goto error;
- scratch->dst = mem;
- }
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+ spin_lock_bh(&scratch->lock);
+ scratch->src = page_address(page);
+ spin_unlock_bh(&scratch->lock);
return 0;
-error:
- crypto_scomp_free_scratches();
- return -ENOMEM;
}
-static void scomp_free_streams(struct scomp_alg *alg)
+static void scomp_scratch_workfn(struct work_struct *work)
{
- struct crypto_acomp_stream __percpu *stream = alg->stream;
- int i;
+ int cpu;
- alg->stream = NULL;
- if (!stream)
- return;
+ for_each_cpu(cpu, &scomp_scratch_want) {
+ struct scomp_scratch *scratch;
- for_each_possible_cpu(i) {
- struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
-
- if (IS_ERR_OR_NULL(ps->ctx))
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ if (scratch->src)
+ continue;
+ if (scomp_alloc_scratch(scratch, cpu))
break;
- alg->free_ctx(ps->ctx);
+ cpumask_clear_cpu(cpu, &scomp_scratch_want);
}
-
- free_percpu(stream);
}
-static int scomp_alloc_streams(struct scomp_alg *alg)
+static int crypto_scomp_alloc_scratches(void)
{
- struct crypto_acomp_stream __percpu *stream;
- int i;
-
- stream = alloc_percpu(struct crypto_acomp_stream);
- if (!stream)
- return -ENOMEM;
-
- alg->stream = stream;
-
- for_each_possible_cpu(i) {
- struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
-
- ps->ctx = alg->alloc_ctx();
- if (IS_ERR(ps->ctx)) {
- scomp_free_streams(alg);
- return PTR_ERR(ps->ctx);
- }
+ unsigned int i = cpumask_first(cpu_possible_mask);
+ struct scomp_scratch *scratch;
- spin_lock_init(&ps->lock);
- }
- return 0;
+ scratch = per_cpu_ptr(&scomp_scratch, i);
+ return scomp_alloc_scratch(scratch, i);
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
@@ -158,16 +125,13 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
int ret = 0;
mutex_lock(&scomp_lock);
- if (!alg->stream) {
- ret = scomp_alloc_streams(alg);
- if (ret)
- goto unlock;
- }
- if (!scomp_scratch_users) {
+ ret = crypto_acomp_alloc_streams(&alg->streams);
+ if (ret)
+ goto unlock;
+ if (!scomp_scratch_users++) {
ret = crypto_scomp_alloc_scratches();
if (ret)
- goto unlock;
- scomp_scratch_users++;
+ scomp_scratch_users--;
}
unlock:
mutex_unlock(&scomp_lock);
@@ -175,13 +139,40 @@ unlock:
return ret;
}
+static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
+{
+ int cpu = raw_smp_processor_id();
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ spin_lock(&scratch->lock);
+ if (likely(scratch->src))
+ return scratch;
+ spin_unlock(&scratch->lock);
+
+ cpumask_set_cpu(cpu, &scomp_scratch_want);
+ schedule_work(&scomp_scratch_work);
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask));
+ spin_lock(&scratch->lock);
+ return scratch;
+}
+
+static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
+ __releases(scratch)
+{
+ spin_unlock(&scratch->lock);
+}
+
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
- struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch);
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
+ bool src_isvirt = acomp_request_src_isvirt(req);
+ bool dst_isvirt = acomp_request_dst_isvirt(req);
struct crypto_scomp *scomp = *tfm_ctx;
struct crypto_acomp_stream *stream;
+ struct scomp_scratch *scratch;
unsigned int slen = req->slen;
unsigned int dlen = req->dlen;
struct page *spage, *dpage;
@@ -198,15 +189,32 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (!req->dst || !dlen)
return -EINVAL;
- if (acomp_request_src_isvirt(req))
+ if (dst_isvirt)
+ dst = req->dvirt;
+ else {
+ if (dlen <= req->dst->length) {
+ dpage = sg_page(req->dst);
+ doff = req->dst->offset;
+ } else
+ return -ENOSYS;
+
+ dpage = nth_page(dpage, doff / PAGE_SIZE);
+ doff = offset_in_page(doff);
+
+ n = (dlen - 1) / PAGE_SIZE;
+ n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
+ if (PageHighMem(dpage + n) &&
+ size_add(doff, dlen) > PAGE_SIZE)
+ return -ENOSYS;
+ dst = kmap_local_page(dpage) + doff;
+ }
+
+ if (src_isvirt)
src = req->svirt;
else {
- src = scratch->src;
+ src = NULL;
do {
- if (acomp_request_src_isfolio(req)) {
- spage = folio_page(req->sfolio, 0);
- soff = req->soff;
- } else if (slen <= req->src->length) {
+ if (slen <= req->src->length) {
spage = sg_page(req->src);
soff = req->src->offset;
} else
@@ -215,8 +223,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
spage = nth_page(spage, soff / PAGE_SIZE);
soff = offset_in_page(soff);
- n = slen / PAGE_SIZE;
- n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
+ n = (slen - 1) / PAGE_SIZE;
+ n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
if (PageHighMem(nth_page(spage, n)) &&
size_add(soff, slen) > PAGE_SIZE)
break;
@@ -224,59 +232,37 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
} while (0);
}
- if (acomp_request_dst_isvirt(req))
- dst = req->dvirt;
- else {
- unsigned int max = SCOMP_SCRATCH_SIZE;
-
- dst = scratch->dst;
- do {
- if (acomp_request_dst_isfolio(req)) {
- dpage = folio_page(req->dfolio, 0);
- doff = req->doff;
- } else if (dlen <= req->dst->length) {
- dpage = sg_page(req->dst);
- doff = req->dst->offset;
- } else
- break;
-
- dpage = nth_page(dpage, doff / PAGE_SIZE);
- doff = offset_in_page(doff);
+ stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
- n = dlen / PAGE_SIZE;
- n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
- if (PageHighMem(dpage + n) &&
- size_add(doff, dlen) > PAGE_SIZE)
- break;
- dst = kmap_local_page(dpage) + doff;
- max = dlen;
- } while (0);
- dlen = min(dlen, max);
- }
-
- spin_lock_bh(&scratch->lock);
+ if (!src_isvirt && !src) {
+ const u8 *src;
- if (src == scratch->src)
+ scratch = scomp_lock_scratch();
+ src = scratch->src;
memcpy_from_sglist(scratch->src, req->src, 0, slen);
- stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
- spin_lock(&stream->lock);
- if (dir)
+ if (dir)
+ ret = crypto_scomp_compress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+ else
+ ret = crypto_scomp_decompress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+
+ scomp_unlock_scratch(scratch);
+ } else if (dir)
ret = crypto_scomp_compress(scomp, src, slen,
dst, &dlen, stream->ctx);
else
ret = crypto_scomp_decompress(scomp, src, slen,
dst, &dlen, stream->ctx);
- if (dst == scratch->dst)
- memcpy_to_sglist(req->dst, 0, dst, dlen);
-
- spin_unlock(&stream->lock);
- spin_unlock_bh(&scratch->lock);
+ crypto_acomp_unlock_stream_bh(stream);
req->dlen = dlen;
- if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) {
+ if (!src_isvirt && src)
+ kunmap_local(src);
+ if (!dst_isvirt) {
kunmap_local(dst);
dlen += doff;
for (;;) {
@@ -287,34 +273,18 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
dpage = nth_page(dpage, 1);
}
}
- if (!acomp_request_src_isvirt(req) && src != scratch->src)
- kunmap_local(src);
return ret;
}
-static int scomp_acomp_chain(struct acomp_req *req, int dir)
-{
- struct acomp_req *r2;
- int err;
-
- err = scomp_acomp_comp_decomp(req, dir);
- req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list)
- r2->base.err = scomp_acomp_comp_decomp(r2, dir);
-
- return err;
-}
-
static int scomp_acomp_compress(struct acomp_req *req)
{
- return scomp_acomp_chain(req, 1);
+ return scomp_acomp_comp_decomp(req, 1);
}
static int scomp_acomp_decompress(struct acomp_req *req)
{
- return scomp_acomp_chain(req, 0);
+ return scomp_acomp_comp_decomp(req, 0);
}
static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
@@ -323,6 +293,7 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
crypto_free_scomp(*ctx);
+ flush_work(&scomp_scratch_work);
mutex_lock(&scomp_lock);
if (!--scomp_scratch_users)
crypto_scomp_free_scratches();
@@ -356,7 +327,9 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
static void crypto_scomp_destroy(struct crypto_alg *alg)
{
- scomp_free_streams(__crypto_scomp_alg(alg));
+ struct scomp_alg *scomp = __crypto_scomp_alg(alg);
+
+ crypto_acomp_free_streams(&scomp->streams);
}
static const struct crypto_type crypto_scomp_type = {
@@ -373,6 +346,7 @@ static const struct crypto_type crypto_scomp_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
.tfmsize = offsetof(struct crypto_scomp, base),
+ .algsize = offsetof(struct scomp_alg, base),
};
static void scomp_prepare_alg(struct scomp_alg *alg)
@@ -381,7 +355,7 @@ static void scomp_prepare_alg(struct scomp_alg *alg)
comp_prepare_alg(&alg->calg);
- base->cra_flags |= CRYPTO_ALG_REQ_CHAIN;
+ base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
}
int crypto_register_scomp(struct scomp_alg *alg)
diff --git a/crypto/seed.c b/crypto/seed.c
index d05d8ed909fa..815391f213de 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -460,7 +460,7 @@ static void __exit seed_fini(void)
crypto_unregister_alg(&seed_alg);
}
-subsys_initcall(seed_init);
+module_init(seed_init);
module_exit(seed_fini);
MODULE_DESCRIPTION("SEED Cipher Algorithm");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 17e11d51ddc3..2bae99e33526 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -64,20 +64,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
data = req->base.data;
info = req->iv;
- if (req->src != req->dst) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
-
- skcipher_request_set_sync_tfm(nreq, ctx->sknull);
- skcipher_request_set_callback(nreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst,
- req->assoclen + req->cryptlen,
- NULL);
-
- err = crypto_skcipher_encrypt(nreq);
- if (err)
- return err;
- }
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src,
+ req->assoclen + req->cryptlen);
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
@@ -179,7 +168,7 @@ static void __exit seqiv_module_exit(void)
crypto_unregister_template(&seqiv_tmpl);
}
-subsys_initcall(seqiv_module_init);
+module_init(seqiv_module_init);
module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index f6ef187be6fe..b21e7606c652 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -599,7 +599,7 @@ static void __exit serpent_mod_fini(void)
crypto_unregister_alg(&srp_alg);
}
-subsys_initcall(serpent_mod_init);
+module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 325b57fe28dc..024e8043bab0 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -12,13 +12,11 @@
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
@@ -39,38 +37,31 @@ static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
memzero_explicit(temp, sizeof(temp));
}
-int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- return sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
+ return sha1_base_do_update_blocks(desc, data, len,
+ sha1_generic_block_fn);
}
-EXPORT_SYMBOL(crypto_sha1_update);
-static int sha1_final(struct shash_desc *desc, u8 *out)
+static int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- sha1_base_do_finalize(desc, sha1_generic_block_fn);
+ sha1_base_do_finup(desc, data, len, sha1_generic_block_fn);
return sha1_base_finish(desc, out);
}
-int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
- return sha1_final(desc, out);
-}
-EXPORT_SYMBOL(crypto_sha1_finup);
-
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = crypto_sha1_update,
- .final = sha1_final,
.finup = crypto_sha1_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -86,7 +77,7 @@ static void __exit sha1_generic_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(sha1_generic_mod_init);
+module_init(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha256.c b/crypto/sha256.c
new file mode 100644
index 000000000000..4aeb213bab11
--- /dev/null
+++ b/crypto/sha256.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API wrapper for the SHA-256 and SHA-224 library functions
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+};
+EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
+
+const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
+
+static int crypto_sha256_init(struct shash_desc *desc)
+{
+ sha256_block_init(shash_desc_ctx(desc));
+ return 0;
+}
+
+static inline int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, bool force_generic)
+{
+ struct crypto_sha256_state *sctx = shash_desc_ctx(desc);
+ int remain = len % SHA256_BLOCK_SIZE;
+
+ sctx->count += len - remain;
+ sha256_choose_blocks(sctx->state, data, len / SHA256_BLOCK_SIZE,
+ force_generic, !force_generic);
+ return remain;
+}
+
+static int crypto_sha256_update_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_sha256_update(desc, data, len, true);
+}
+
+static int crypto_sha256_update_lib(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ sha256_update(shash_desc_ctx(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha256_update_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_sha256_update(desc, data, len, false);
+}
+
+static int crypto_sha256_final_lib(struct shash_desc *desc, u8 *out)
+{
+ sha256_final(shash_desc_ctx(desc), out);
+ return 0;
+}
+
+static __always_inline int crypto_sha256_finup(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len, u8 *out,
+ bool force_generic)
+{
+ struct crypto_sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int remain = len;
+ u8 *buf;
+
+ if (len >= SHA256_BLOCK_SIZE)
+ remain = crypto_sha256_update(desc, data, len, force_generic);
+ sctx->count += remain;
+ buf = memcpy(sctx + 1, data + len - remain, remain);
+ sha256_finup(sctx, buf, remain, out,
+ crypto_shash_digestsize(desc->tfm), force_generic,
+ !force_generic);
+ return 0;
+}
+
+static int crypto_sha256_finup_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return crypto_sha256_finup(desc, data, len, out, true);
+}
+
+static int crypto_sha256_finup_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return crypto_sha256_finup(desc, data, len, out, false);
+}
+
+static int crypto_sha256_digest_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ crypto_sha256_init(desc);
+ return crypto_sha256_finup_generic(desc, data, len, out);
+}
+
+static int crypto_sha256_digest_lib(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ sha256(data, len, out);
+ return 0;
+}
+
+static int crypto_sha256_digest_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ crypto_sha256_init(desc);
+ return crypto_sha256_finup_arch(desc, data, len, out);
+}
+
+static int crypto_sha224_init(struct shash_desc *desc)
+{
+ sha224_block_init(shash_desc_ctx(desc));
+ return 0;
+}
+
+static int crypto_sha224_final_lib(struct shash_desc *desc, u8 *out)
+{
+ sha224_final(shash_desc_ctx(desc), out);
+ return 0;
+}
+
+static int crypto_sha256_import_lib(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ const u8 *p = in;
+
+ memcpy(sctx, p, sizeof(*sctx));
+ p += sizeof(*sctx);
+ sctx->count += *p;
+ return 0;
+}
+
+static int crypto_sha256_export_lib(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx0 = shash_desc_ctx(desc);
+ struct sha256_state sctx = *sctx0;
+ unsigned int partial;
+ u8 *p = out;
+
+ partial = sctx.count % SHA256_BLOCK_SIZE;
+ sctx.count -= partial;
+ memcpy(p, &sctx, sizeof(sctx));
+ p += sizeof(sctx);
+ *p = partial;
+ return 0;
+}
+
+static struct shash_alg algs[] = {
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_generic,
+ .finup = crypto_sha256_finup_generic,
+ .digest = crypto_sha256_digest_generic,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_generic,
+ .finup = crypto_sha256_finup_generic,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-lib",
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_lib,
+ .final = crypto_sha256_final_lib,
+ .digest = crypto_sha256_digest_lib,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct crypto_sha256_state) +
+ SHA256_BLOCK_SIZE + 1,
+ .import = crypto_sha256_import_lib,
+ .export = crypto_sha256_export_lib,
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-lib",
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_lib,
+ .final = crypto_sha224_final_lib,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct crypto_sha256_state) +
+ SHA256_BLOCK_SIZE + 1,
+ .import = crypto_sha256_import_lib,
+ .export = crypto_sha256_export_lib,
+ },
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_arch,
+ .finup = crypto_sha256_finup_arch,
+ .digest = crypto_sha256_digest_arch,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_arch,
+ .finup = crypto_sha256_finup_arch,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+};
+
+static unsigned int num_algs;
+
+static int __init crypto_sha256_mod_init(void)
+{
+ /* register the arch flavours only if they differ from generic */
+ num_algs = ARRAY_SIZE(algs);
+ BUILD_BUG_ON(ARRAY_SIZE(algs) <= 2);
+ if (!sha256_is_arch_optimized())
+ num_algs -= 2;
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_sha256_mod_init);
+
+static void __exit crypto_sha256_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, num_algs);
+}
+module_exit(crypto_sha256_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API wrapper for the SHA-256 and SHA-224 library functions");
+
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-generic");
+MODULE_ALIAS_CRYPTO("sha256-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-generic");
+MODULE_ALIAS_CRYPTO("sha224-" __stringify(ARCH));
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
deleted file mode 100644
index b00521f1a6d4..000000000000
--- a/crypto/sha256_generic.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c
- *
- * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
-
-const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
- 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
- 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
- 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
- 0x2f
-};
-EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
-
-const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
- 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
- 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
- 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
- 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
-};
-EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
-
-int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- sha256_update(shash_desc_ctx(desc), data, len);
- return 0;
-}
-EXPORT_SYMBOL(crypto_sha256_update);
-
-static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
-{
- if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE)
- sha224_final(shash_desc_ctx(desc), out);
- else
- sha256_final(shash_desc_ctx(desc), out);
- return 0;
-}
-
-int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash)
-{
- sha256_update(shash_desc_ctx(desc), data, len);
- return crypto_sha256_final(desc, hash);
-}
-EXPORT_SYMBOL(crypto_sha256_finup);
-
-static struct shash_alg sha256_algs[2] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_update,
- .final = crypto_sha256_final,
- .finup = crypto_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_update,
- .final = crypto_sha256_final,
- .finup = crypto_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha256_generic_mod_init(void)
-{
- return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
-}
-
-static void __exit sha256_generic_mod_fini(void)
-{
- crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
-}
-
-subsys_initcall(sha256_generic_mod_init);
-module_exit(sha256_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
-
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha224-generic");
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha256-generic");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index b103642b56ea..41d1e506e6de 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -9,10 +9,10 @@
* Ard Biesheuvel <ard.biesheuvel@linaro.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
#include <crypto/sha3.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <linux/unaligned.h>
/*
@@ -161,68 +161,51 @@ static void keccakf(u64 st[25])
int crypto_sha3_init(struct shash_desc *desc)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
-
- sctx->rsiz = 200 - 2 * digest_size;
- sctx->rsizw = sctx->rsiz / 8;
- sctx->partial = 0;
memset(sctx->st, 0, sizeof(sctx->st));
return 0;
}
EXPORT_SYMBOL(crypto_sha3_init);
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
+ unsigned int rsiz = crypto_shash_blocksize(desc->tfm);
struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int done;
- const u8 *src;
-
- done = 0;
- src = data;
-
- if ((sctx->partial + len) > (sctx->rsiz - 1)) {
- if (sctx->partial) {
- done = -sctx->partial;
- memcpy(sctx->buf + sctx->partial, data,
- done + sctx->rsiz);
- src = sctx->buf;
- }
+ unsigned int rsizw = rsiz / 8;
- do {
- unsigned int i;
+ do {
+ int i;
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
- keccakf(sctx->st);
+ for (i = 0; i < rsizw; i++)
+ sctx->st[i] ^= get_unaligned_le64(data + 8 * i);
+ keccakf(sctx->st);
- done += sctx->rsiz;
- src = data + done;
- } while (done + (sctx->rsiz - 1) < len);
-
- sctx->partial = 0;
- }
- memcpy(sctx->buf + sctx->partial, src, len - done);
- sctx->partial += (len - done);
-
- return 0;
+ data += rsiz;
+ len -= rsiz;
+ } while (len >= rsiz);
+ return len;
}
-EXPORT_SYMBOL(crypto_sha3_update);
-int crypto_sha3_final(struct shash_desc *desc, u8 *out)
+static int crypto_sha3_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int i, inlen = sctx->partial;
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ unsigned int rsiz = crypto_shash_blocksize(desc->tfm);
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ __le64 block[SHA3_224_BLOCK_SIZE / 8] = {};
__le64 *digest = (__le64 *)out;
+ unsigned int rsizw = rsiz / 8;
+ u8 *p;
+ int i;
- sctx->buf[inlen++] = 0x06;
- memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
- sctx->buf[sctx->rsiz - 1] |= 0x80;
+ p = memcpy(block, src, len);
+ p[len++] = 0x06;
+ p[rsiz - 1] |= 0x80;
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
+ for (i = 0; i < rsizw; i++)
+ sctx->st[i] ^= le64_to_cpu(block[i]);
+ memzero_explicit(block, sizeof(block));
keccakf(sctx->st);
@@ -232,49 +215,51 @@ int crypto_sha3_final(struct shash_desc *desc, u8 *out)
if (digest_size & 4)
put_unaligned_le32(sctx->st[i], (__le32 *)digest);
- memset(sctx, 0, sizeof(*sctx));
return 0;
}
-EXPORT_SYMBOL(crypto_sha3_final);
static struct shash_alg algs[] = { {
.digestsize = SHA3_224_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_256_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_384_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_512_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
@@ -289,7 +274,7 @@ static void __exit sha3_generic_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-subsys_initcall(sha3_generic_mod_init);
+module_init(sha3_generic_mod_init);
module_exit(sha3_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index ed81813bd420..7368173f545e 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -6,16 +6,10 @@
* Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <linux/percpu.h>
-#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/unaligned.h>
const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
@@ -145,47 +139,42 @@ sha512_transform(u64 *state, const u8 *input)
state[4] += e; state[5] += f; state[6] += g; state[7] += h;
}
-static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
- int blocks)
+void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
+ int blocks)
{
- while (blocks--) {
+ do {
sha512_transform(sst->state, src);
src += SHA512_BLOCK_SIZE;
- }
+ } while (--blocks);
}
+EXPORT_SYMBOL_GPL(sha512_generic_block_fn);
-int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- return sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
+ return sha512_base_do_update_blocks(desc, data, len,
+ sha512_generic_block_fn);
}
-EXPORT_SYMBOL(crypto_sha512_update);
-static int sha512_final(struct shash_desc *desc, u8 *hash)
+static int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash)
{
- sha512_base_do_finalize(desc, sha512_generic_block_fn);
+ sha512_base_do_finup(desc, data, len, sha512_generic_block_fn);
return sha512_base_finish(desc, hash);
}
-int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash)
-{
- sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
- return sha512_final(desc, hash);
-}
-EXPORT_SYMBOL(crypto_sha512_finup);
-
static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = crypto_sha512_update,
- .final = sha512_final,
.finup = crypto_sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -193,13 +182,14 @@ static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = crypto_sha512_update,
- .final = sha512_final,
.finup = crypto_sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -215,7 +205,7 @@ static void __exit sha512_generic_mod_fini(void)
crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
-subsys_initcall(sha512_generic_mod_init);
+module_init(sha512_generic_mod_init);
module_exit(sha512_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/shash.c b/crypto/shash.c
index 301ab42bf849..37537d7995c7 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -16,6 +16,24 @@
#include "hash.h"
+static inline bool crypto_shash_block_only(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_BLOCK_ONLY;
+}
+
+static inline bool crypto_shash_final_nonzero(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
+}
+
+static inline bool crypto_shash_finup_max(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_FINUP_MAX;
+}
+
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -46,18 +64,27 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int __crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->update(desc, data, len);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_block_only(tfm)) {
+ u8 *buf = shash_desc_ctx(desc);
+
+ buf += crypto_shash_descsize(tfm) - 1;
+ *buf = 0;
+ }
+
+ return crypto_shash_alg(tfm)->init(desc);
}
-EXPORT_SYMBOL_GPL(crypto_shash_update);
-int crypto_shash_final(struct shash_desc *desc, u8 *out)
+int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->final(desc, out);
+ if (crypto_shash_get_flags(desc->tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return __crypto_shash_init(desc);
}
-EXPORT_SYMBOL_GPL(crypto_shash_final);
+EXPORT_SYMBOL_GPL(crypto_shash_init);
static int shash_default_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
@@ -68,20 +95,89 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data,
shash->final(desc, out);
}
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int crypto_shash_op_and_zero(
+ int (*op)(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out),
+ struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
{
- return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out);
+ int err;
+
+ err = op(desc, data, len, out);
+ memset(shash_desc_ctx(desc), 0, crypto_shash_descsize(desc->tfm));
+ return err;
+}
+
+int crypto_shash_finup(struct shash_desc *restrict desc, const u8 *data,
+ unsigned int len, u8 *restrict out)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ u8 *blenp = shash_desc_ctx(desc);
+ bool finup_max, nonzero;
+ unsigned int bs;
+ int err;
+ u8 *buf;
+
+ if (!crypto_shash_block_only(tfm)) {
+ if (out)
+ goto finup;
+ return crypto_shash_alg(tfm)->update(desc, data, len);
+ }
+
+ finup_max = out && crypto_shash_finup_max(tfm);
+
+ /* Retain extra block for final nonzero algorithms. */
+ nonzero = crypto_shash_final_nonzero(tfm);
+
+ /*
+ * The partial block buffer follows the algorithm desc context.
+ * The byte following that contains the length.
+ */
+ blenp += crypto_shash_descsize(tfm) - 1;
+ bs = crypto_shash_blocksize(tfm);
+ buf = blenp - bs;
+
+ if (likely(!*blenp && finup_max))
+ goto finup;
+
+ while ((*blenp + len) >= bs + nonzero) {
+ unsigned int nbytes = len - nonzero;
+ const u8 *src = data;
+
+ if (*blenp) {
+ memcpy(buf + *blenp, data, bs - *blenp);
+ nbytes = bs;
+ src = buf;
+ }
+
+ err = crypto_shash_alg(tfm)->update(desc, src, nbytes);
+ if (err < 0)
+ return err;
+
+ data += nbytes - err - *blenp;
+ len -= nbytes - err - *blenp;
+ *blenp = 0;
+ }
+
+ if (*blenp || !out) {
+ memcpy(buf + *blenp, data, len);
+ *blenp += len;
+ if (!out)
+ return 0;
+ data = buf;
+ len = *blenp;
+ }
+
+finup:
+ return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->finup, desc,
+ data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
static int shash_default_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- struct shash_alg *shash = crypto_shash_alg(desc->tfm);
-
- return shash->init(desc) ?:
- shash->finup(desc, data, len, out);
+ return __crypto_shash_init(desc) ?:
+ crypto_shash_finup(desc, data, len, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -92,7 +188,8 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- return crypto_shash_alg(tfm)->digest(desc, data, len, out);
+ return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->digest, desc,
+ data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@@ -100,44 +197,104 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out)
{
SHASH_DESC_ON_STACK(desc, tfm);
- int err;
desc->tfm = tfm;
+ return crypto_shash_digest(desc, data, len, out);
+}
+EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
- err = crypto_shash_digest(desc, data, len, out);
+static int __crypto_shash_export(struct shash_desc *desc, void *out,
+ int (*export)(struct shash_desc *desc,
+ void *out))
+{
+ struct crypto_shash *tfm = desc->tfm;
+ u8 *buf = shash_desc_ctx(desc);
+ unsigned int plen, ss;
+
+ plen = crypto_shash_blocksize(tfm) + 1;
+ ss = crypto_shash_statesize(tfm);
+ if (crypto_shash_block_only(tfm))
+ ss -= plen;
+ if (!export) {
+ memcpy(out, buf, ss);
+ return 0;
+ }
- shash_desc_zero(desc);
+ return export(desc, out);
+}
- return err;
+int crypto_shash_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_shash_export(desc, out,
+ crypto_shash_alg(desc->tfm)->export_core);
}
-EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
+EXPORT_SYMBOL_GPL(crypto_shash_export_core);
int crypto_shash_export(struct shash_desc *desc, void *out)
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- if (shash->export)
- return shash->export(desc, out);
+ if (crypto_shash_block_only(tfm)) {
+ unsigned int plen = crypto_shash_blocksize(tfm) + 1;
+ unsigned int descsize = crypto_shash_descsize(tfm);
+ unsigned int ss = crypto_shash_statesize(tfm);
+ u8 *buf = shash_desc_ctx(desc);
- memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm));
- return 0;
+ memcpy(out + ss - plen, buf + descsize - plen, plen);
+ }
+ return __crypto_shash_export(desc, out, crypto_shash_alg(tfm)->export);
}
EXPORT_SYMBOL_GPL(crypto_shash_export);
-int crypto_shash_import(struct shash_desc *desc, const void *in)
+static int __crypto_shash_import(struct shash_desc *desc, const void *in,
+ int (*import)(struct shash_desc *desc,
+ const void *in))
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
+ unsigned int descsize, plen, ss;
+ u8 *buf = shash_desc_ctx(desc);
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- if (shash->import)
- return shash->import(desc, in);
+ plen = crypto_shash_blocksize(tfm) + 1;
+ descsize = crypto_shash_descsize(tfm);
+ ss = crypto_shash_statesize(tfm);
+ buf[descsize - 1] = 0;
+ if (crypto_shash_block_only(tfm))
+ ss -= plen;
+ if (!import) {
+ memcpy(buf, in, ss);
+ return 0;
+ }
- memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
- return 0;
+ return import(desc, in);
+}
+
+int crypto_shash_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_shash_import(desc, in,
+ crypto_shash_alg(desc->tfm)->import_core);
+}
+EXPORT_SYMBOL_GPL(crypto_shash_import_core);
+
+int crypto_shash_import(struct shash_desc *desc, const void *in)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ int err;
+
+ err = __crypto_shash_import(desc, in, crypto_shash_alg(tfm)->import);
+ if (crypto_shash_block_only(tfm)) {
+ unsigned int plen = crypto_shash_blocksize(tfm) + 1;
+ unsigned int descsize = crypto_shash_descsize(tfm);
+ unsigned int ss = crypto_shash_statesize(tfm);
+ u8 *buf = shash_desc_ctx(desc);
+
+ memcpy(buf + descsize - plen, in + ss - plen, plen);
+ if (buf[descsize - 1] >= plen)
+ err = -EOVERFLOW;
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(crypto_shash_import);
@@ -153,9 +310,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
struct shash_alg *alg = crypto_shash_alg(hash);
- int err;
-
- hash->descsize = alg->descsize;
shash_set_needkey(hash, alg);
@@ -165,18 +319,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
if (!alg->init_tfm)
return 0;
- err = alg->init_tfm(hash);
- if (err)
- return err;
-
- /* ->init_tfm() may have increased the descsize. */
- if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) {
- if (alg->exit_tfm)
- alg->exit_tfm(hash);
- return -EINVAL;
- }
-
- return 0;
+ return alg->init_tfm(hash);
}
static void crypto_shash_free_instance(struct crypto_instance *inst)
@@ -227,6 +370,7 @@ const struct crypto_type crypto_shash_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
.tfmsize = offsetof(struct crypto_shash, base),
+ .algsize = offsetof(struct shash_alg, base),
};
int crypto_grab_shash(struct crypto_shash_spawn *spawn,
@@ -273,8 +417,6 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
if (IS_ERR(nhash))
return nhash;
- nhash->descsize = hash->descsize;
-
if (alg->clone_tfm) {
err = alg->clone_tfm(nhash, hash);
if (err) {
@@ -283,6 +425,9 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
}
}
+ if (alg->exit_tfm)
+ crypto_shash_tfm(nhash)->exit = crypto_shash_exit_tfm;
+
return nhash;
}
EXPORT_SYMBOL_GPL(crypto_clone_shash);
@@ -303,14 +448,21 @@ int hash_prepare_alg(struct hash_alg_common *alg)
return 0;
}
+static int shash_default_export_core(struct shash_desc *desc, void *out)
+{
+ return -ENOSYS;
+}
+
+static int shash_default_import_core(struct shash_desc *desc, const void *in)
+{
+ return -ENOSYS;
+}
+
static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
- if (alg->descsize > HASH_MAX_DESCSIZE)
- return -EINVAL;
-
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
@@ -320,6 +472,7 @@ static int shash_prepare_alg(struct shash_alg *alg)
base->cra_type = &crypto_shash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
+ base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
/*
* Handle missing optional functions. For each one we can either
@@ -336,11 +489,30 @@ static int shash_prepare_alg(struct shash_alg *alg)
alg->finup = shash_default_finup;
if (!alg->digest)
alg->digest = shash_default_digest;
- if (!alg->export)
+ if (!alg->export && !alg->halg.statesize)
alg->halg.statesize = alg->descsize;
if (!alg->setkey)
alg->setkey = shash_no_setkey;
+ if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+ BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+ alg->descsize += base->cra_blocksize + 1;
+ alg->statesize += base->cra_blocksize + 1;
+ alg->export_core = alg->export;
+ alg->import_core = alg->import;
+ } else if (!alg->export_core || !alg->import_core) {
+ alg->export_core = shash_default_export_core;
+ alg->import_core = shash_default_import_core;
+ base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ }
+
+ if (alg->descsize > HASH_MAX_DESCSIZE)
+ return -EINVAL;
+ if (alg->statesize > HASH_MAX_STATESIZE)
+ return -EINVAL;
+
+ base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize;
+
return 0;
}
diff --git a/crypto/sig.c b/crypto/sig.c
index dfc7cae90802..beba745b6405 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -74,6 +74,7 @@ static const struct crypto_type crypto_sig_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
.tfmsize = offsetof(struct crypto_sig, base),
+ .algsize = offsetof(struct sig_alg, base),
};
struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask)
@@ -102,6 +103,11 @@ static int sig_default_set_key(struct crypto_sig *tfm,
return -ENOSYS;
}
+static unsigned int sig_default_size(struct crypto_sig *tfm)
+{
+ return DIV_ROUND_UP_POW2(crypto_sig_keysize(tfm), BITS_PER_BYTE);
+}
+
static int sig_prepare_alg(struct sig_alg *alg)
{
struct crypto_alg *base = &alg->base;
@@ -117,9 +123,9 @@ static int sig_prepare_alg(struct sig_alg *alg)
if (!alg->key_size)
return -EINVAL;
if (!alg->max_size)
- alg->max_size = alg->key_size;
+ alg->max_size = sig_default_size;
if (!alg->digest_size)
- alg->digest_size = alg->key_size;
+ alg->digest_size = sig_default_size;
base->cra_type = &crypto_sig_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 132075a905d9..de5fc91bba26 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -17,7 +17,6 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -28,258 +27,14 @@
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
-enum {
- SKCIPHER_WALK_SLOW = 1 << 0,
- SKCIPHER_WALK_COPY = 1 << 1,
- SKCIPHER_WALK_DIFF = 1 << 2,
- SKCIPHER_WALK_SLEEP = 1 << 3,
-};
-
static const struct crypto_type crypto_skcipher_type;
-static int skcipher_walk_next(struct skcipher_walk *walk);
-
-static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
-{
- return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
-
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
return container_of(alg, struct skcipher_alg, base);
}
-/**
- * skcipher_walk_done() - finish one step of a skcipher_walk
- * @walk: the skcipher_walk
- * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
- * or a -errno value to terminate the walk due to an error
- *
- * This function cleans up after one step of walking through the source and
- * destination scatterlists, and advances to the next step if applicable.
- * walk->nbytes is set to the number of bytes available in the next step,
- * walk->total is set to the new total number of bytes remaining, and
- * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
- * is no more data, or if an error occurred (i.e. -errno return), then
- * walk->nbytes and walk->total are set to 0 and all resources owned by the
- * skcipher_walk are freed.
- *
- * Return: 0 or a -errno value. If @res was a -errno value then it will be
- * returned, but other errors may occur too.
- */
-int skcipher_walk_done(struct skcipher_walk *walk, int res)
-{
- unsigned int n = walk->nbytes; /* num bytes processed this step */
- unsigned int total = 0; /* new total remaining */
-
- if (!n)
- goto finish;
-
- if (likely(res >= 0)) {
- n -= res; /* subtract num bytes *not* processed */
- total = walk->total - n;
- }
-
- if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
- scatterwalk_advance(&walk->in, n);
- } else if (walk->flags & SKCIPHER_WALK_DIFF) {
- scatterwalk_done_src(&walk->in, n);
- } else if (walk->flags & SKCIPHER_WALK_COPY) {
- scatterwalk_advance(&walk->in, n);
- scatterwalk_map(&walk->out);
- memcpy(walk->out.addr, walk->page, n);
- } else { /* SKCIPHER_WALK_SLOW */
- if (res > 0) {
- /*
- * Didn't process all bytes. Either the algorithm is
- * broken, or this was the last step and it turned out
- * the message wasn't evenly divisible into blocks but
- * the algorithm requires it.
- */
- res = -EINVAL;
- total = 0;
- } else
- memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
- goto dst_done;
- }
-
- scatterwalk_done_dst(&walk->out, n);
-dst_done:
-
- if (res > 0)
- res = 0;
-
- walk->total = total;
- walk->nbytes = 0;
-
- if (total) {
- if (walk->flags & SKCIPHER_WALK_SLEEP)
- cond_resched();
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
- return skcipher_walk_next(walk);
- }
-
-finish:
- /* Short-circuit for the common/fast path. */
- if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
- goto out;
-
- if (walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-
-out:
- return res;
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_done);
-
-static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- unsigned alignmask = walk->alignmask;
- unsigned n;
- void *buffer;
-
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (!buffer) {
- /* Min size for a buffer of bsize bytes aligned to alignmask */
- n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- buffer = kzalloc(n, skcipher_walk_gfp(walk));
- if (!buffer)
- return skcipher_walk_done(walk, -ENOMEM);
- walk->buffer = buffer;
- }
-
- buffer = PTR_ALIGN(buffer, alignmask + 1);
- memcpy_from_scatterwalk(buffer, &walk->in, bsize);
- walk->out.__addr = buffer;
- walk->in.__addr = walk->out.addr;
-
- walk->nbytes = bsize;
- walk->flags |= SKCIPHER_WALK_SLOW;
-
- return 0;
-}
-
-static int skcipher_next_copy(struct skcipher_walk *walk)
-{
- void *tmp = walk->page;
-
- scatterwalk_map(&walk->in);
- memcpy(tmp, walk->in.addr, walk->nbytes);
- scatterwalk_unmap(&walk->in);
- /*
- * walk->in is advanced later when the number of bytes actually
- * processed (which might be less than walk->nbytes) is known.
- */
-
- walk->in.__addr = tmp;
- walk->out.__addr = tmp;
- return 0;
-}
-
-static int skcipher_next_fast(struct skcipher_walk *walk)
-{
- unsigned long diff;
-
- diff = offset_in_page(walk->in.offset) -
- offset_in_page(walk->out.offset);
- diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
- (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
-
- scatterwalk_map(&walk->out);
- walk->in.__addr = walk->out.__addr;
-
- if (diff) {
- walk->flags |= SKCIPHER_WALK_DIFF;
- scatterwalk_map(&walk->in);
- }
-
- return 0;
-}
-
-static int skcipher_walk_next(struct skcipher_walk *walk)
-{
- unsigned int bsize;
- unsigned int n;
-
- n = walk->total;
- bsize = min(walk->stride, max(n, walk->blocksize));
- n = scatterwalk_clamp(&walk->in, n);
- n = scatterwalk_clamp(&walk->out, n);
-
- if (unlikely(n < bsize)) {
- if (unlikely(walk->total < walk->blocksize))
- return skcipher_walk_done(walk, -EINVAL);
-
-slow_path:
- return skcipher_next_slow(walk, bsize);
- }
- walk->nbytes = n;
-
- if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
- if (!walk->page) {
- gfp_t gfp = skcipher_walk_gfp(walk);
-
- walk->page = (void *)__get_free_page(gfp);
- if (!walk->page)
- goto slow_path;
- }
- walk->flags |= SKCIPHER_WALK_COPY;
- return skcipher_next_copy(walk);
- }
-
- return skcipher_next_fast(walk);
-}
-
-static int skcipher_copy_iv(struct skcipher_walk *walk)
-{
- unsigned alignmask = walk->alignmask;
- unsigned ivsize = walk->ivsize;
- unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
- unsigned size;
- u8 *iv;
-
- /* Min size for a buffer of stride + ivsize, aligned to alignmask */
- size = aligned_stride + ivsize +
- (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
- if (!walk->buffer)
- return -ENOMEM;
-
- iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
-
- walk->iv = memcpy(iv, walk->iv, walk->ivsize);
- return 0;
-}
-
-static int skcipher_walk_first(struct skcipher_walk *walk)
-{
- if (WARN_ON_ONCE(in_hardirq()))
- return -EDEADLK;
-
- walk->buffer = NULL;
- if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
- int err = skcipher_copy_iv(walk);
- if (err)
- return err;
- }
-
- walk->page = NULL;
-
- return skcipher_walk_next(walk);
-}
-
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req, bool atomic)
{
@@ -294,10 +49,8 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
- walk->flags = SKCIPHER_WALK_SLEEP;
- else
- walk->flags = 0;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ atomic = true;
if (unlikely(!walk->total))
return 0;
@@ -314,7 +67,7 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
else
walk->stride = alg->walksize;
- return skcipher_walk_first(walk);
+ return skcipher_walk_first(walk, atomic);
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
@@ -327,10 +80,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
- walk->flags = SKCIPHER_WALK_SLEEP;
- else
- walk->flags = 0;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ atomic = true;
if (unlikely(!walk->total))
return 0;
@@ -343,7 +94,7 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm);
- return skcipher_walk_first(walk);
+ return skcipher_walk_first(walk, atomic);
}
int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
@@ -620,6 +371,7 @@ static const struct crypto_type crypto_skcipher_type = {
.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
+ .algsize = offsetof(struct skcipher_alg, base),
};
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
diff --git a/crypto/sm3.c b/crypto/sm3.c
deleted file mode 100644
index 18c2fb73ba16..000000000000
--- a/crypto/sm3.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
- * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
- *
- * Copyright (C) 2017 ARM Limited or its affiliates.
- * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
- * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#include <linux/module.h>
-#include <linux/unaligned.h>
-#include <crypto/sm3.h>
-
-static const u32 ____cacheline_aligned K[64] = {
- 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
- 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
- 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
- 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
- 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
- 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
- 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
- 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
- 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
- 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
- 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
- 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
- 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
- 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
- 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
- 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
-};
-
-/*
- * Transform the message X which consists of 16 32-bit-words. See
- * GM/T 004-2012 for details.
- */
-#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \
- do { \
- ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \
- ss2 = ss1 ^ rol32((a), 12); \
- d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \
- h += GG ## i(e, f, g) + ss1 + (w1); \
- b = rol32((b), 9); \
- f = rol32((f), 19); \
- h = P0((h)); \
- } while (0)
-
-#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
- R(1, a, b, c, d, e, f, g, h, t, w1, w2)
-#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
- R(2, a, b, c, d, e, f, g, h, t, w1, w2)
-
-#define FF1(x, y, z) (x ^ y ^ z)
-#define FF2(x, y, z) ((x & y) | (x & z) | (y & z))
-
-#define GG1(x, y, z) FF1(x, y, z)
-#define GG2(x, y, z) ((x & y) | (~x & z))
-
-/* Message expansion */
-#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
-#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
-#define I(i) (W[i] = get_unaligned_be32(data + i * 4))
-#define W1(i) (W[i & 0x0f])
-#define W2(i) (W[i & 0x0f] = \
- P1(W[i & 0x0f] \
- ^ W[(i-9) & 0x0f] \
- ^ rol32(W[(i-3) & 0x0f], 15)) \
- ^ rol32(W[(i-13) & 0x0f], 7) \
- ^ W[(i-6) & 0x0f])
-
-static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
-{
- u32 a, b, c, d, e, f, g, h, ss1, ss2;
-
- a = sctx->state[0];
- b = sctx->state[1];
- c = sctx->state[2];
- d = sctx->state[3];
- e = sctx->state[4];
- f = sctx->state[5];
- g = sctx->state[6];
- h = sctx->state[7];
-
- R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
- R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
- R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
- R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
- R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
- R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
- R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
- R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
- R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
- R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
- R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
- R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
- R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
- R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
- R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
- R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
-
- R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
- R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
- R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
- R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
- R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
- R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
- R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
- R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
- R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
- R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
- R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
- R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
- R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
- R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
- R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
- R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
-
- R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
- R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
- R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
- R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
- R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
- R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
- R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
- R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
- R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
- R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
- R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
- R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
- R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
- R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
- R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
- R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
-
- R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
- R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
- R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
- R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
- R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
- R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
- R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
- R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
- R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
- R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
- R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
- R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
- R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
- R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
- R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
- R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
-
- sctx->state[0] ^= a;
- sctx->state[1] ^= b;
- sctx->state[2] ^= c;
- sctx->state[3] ^= d;
- sctx->state[4] ^= e;
- sctx->state[5] ^= f;
- sctx->state[6] ^= g;
- sctx->state[7] ^= h;
-}
-#undef R
-#undef R1
-#undef R2
-#undef I
-#undef W1
-#undef W2
-
-static inline void sm3_block(struct sm3_state *sctx,
- u8 const *data, int blocks, u32 W[16])
-{
- while (blocks--) {
- sm3_transform(sctx, data, W);
- data += SM3_BLOCK_SIZE;
- }
-}
-
-void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len)
-{
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
- u32 W[16];
-
- sctx->count += len;
-
- if ((partial + len) >= SM3_BLOCK_SIZE) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- sm3_block(sctx, sctx->buffer, 1, W);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- sm3_block(sctx, data, blocks, W);
- data += blocks * SM3_BLOCK_SIZE;
- }
-
- memzero_explicit(W, sizeof(W));
-
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-}
-EXPORT_SYMBOL_GPL(sm3_update);
-
-void sm3_final(struct sm3_state *sctx, u8 *out)
-{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- __be32 *digest = (__be32 *)out;
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
- u32 W[16];
- int i;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial);
- partial = 0;
-
- sm3_block(sctx, sctx->buffer, 1, W);
- }
-
- memset(sctx->buffer + partial, 0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- sm3_block(sctx, sctx->buffer, 1, W);
-
- for (i = 0; i < 8; i++)
- put_unaligned_be32(sctx->state[i], digest++);
-
- /* Zeroize sensitive information. */
- memzero_explicit(W, sizeof(W));
- memzero_explicit(sctx, sizeof(*sctx));
-}
-EXPORT_SYMBOL_GPL(sm3_final);
-
-MODULE_DESCRIPTION("Generic SM3 library");
-MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index a2d23a46924e..7529139fcc96 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -9,15 +9,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
-#include <linux/bitops.h>
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
@@ -30,38 +25,28 @@ EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
static int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- sm3_update(shash_desc_ctx(desc), data, len);
- return 0;
-}
-
-static int crypto_sm3_final(struct shash_desc *desc, u8 *out)
-{
- sm3_final(shash_desc_ctx(desc), out);
- return 0;
+ return sm3_base_do_update_blocks(desc, data, len, sm3_block_generic);
}
static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- if (len)
- sm3_update(sctx, data, len);
- sm3_final(sctx, hash);
- return 0;
+ sm3_base_do_finup(desc, data, len, sm3_block_generic);
+ return sm3_base_finish(desc, hash);
}
static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = crypto_sm3_update,
- .final = crypto_sm3_final,
.finup = crypto_sm3_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -77,7 +62,7 @@ static void __exit sm3_generic_mod_fini(void)
crypto_unregister_shash(&sm3_alg);
}
-subsys_initcall(sm3_generic_mod_init);
+module_init(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index 7df86369ac00..d57444e8428c 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -83,7 +83,7 @@ static void __exit sm4_fini(void)
crypto_unregister_alg(&sm4_alg);
}
-subsys_initcall(sm4_init);
+module_init(sm4_init);
module_exit(sm4_fini);
MODULE_DESCRIPTION("SM4 Cipher Algorithm");
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index dc625ffc54ad..57bbf70f4c22 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -13,9 +13,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
#include <crypto/streebog.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
static const struct streebog_uint512 buffer0 = { {
0, 0, 0, 0, 0, 0, 0, 0
@@ -919,17 +920,6 @@ static int streebog_init(struct shash_desc *desc)
return 0;
}
-static void streebog_pad(struct streebog_state *ctx)
-{
- if (ctx->fillsize >= STREEBOG_BLOCK_SIZE)
- return;
-
- memset(ctx->buffer + ctx->fillsize, 0,
- sizeof(ctx->buffer) - ctx->fillsize);
-
- ctx->buffer[ctx->fillsize] = 1;
-}
-
static void streebog_add512(const struct streebog_uint512 *x,
const struct streebog_uint512 *y,
struct streebog_uint512 *r)
@@ -984,16 +974,23 @@ static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
}
-static void streebog_stage3(struct streebog_state *ctx)
+static void streebog_stage3(struct streebog_state *ctx, const u8 *src,
+ unsigned int len)
{
struct streebog_uint512 buf = { { 0 } };
+ union {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 m;
+ } u = {};
- buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
- streebog_pad(ctx);
+ buf.qword[0] = cpu_to_le64(len << 3);
+ memcpy(u.buffer, src, len);
+ u.buffer[len] = 1;
- streebog_g(&ctx->h, &ctx->N, &ctx->m);
+ streebog_g(&ctx->h, &ctx->N, &u.m);
streebog_add512(&ctx->N, &buf, &ctx->N);
- streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
+ streebog_add512(&ctx->Sigma, &u.m, &ctx->Sigma);
+ memzero_explicit(&u, sizeof(u));
streebog_g(&ctx->h, &buffer0, &ctx->N);
streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
@@ -1003,42 +1000,22 @@ static int streebog_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
- size_t chunksize;
- if (ctx->fillsize) {
- chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize;
- if (chunksize > len)
- chunksize = len;
- memcpy(&ctx->buffer[ctx->fillsize], data, chunksize);
- ctx->fillsize += chunksize;
- len -= chunksize;
- data += chunksize;
-
- if (ctx->fillsize == STREEBOG_BLOCK_SIZE) {
- streebog_stage2(ctx, ctx->buffer);
- ctx->fillsize = 0;
- }
- }
-
- while (len >= STREEBOG_BLOCK_SIZE) {
+ do {
streebog_stage2(ctx, data);
data += STREEBOG_BLOCK_SIZE;
len -= STREEBOG_BLOCK_SIZE;
- }
+ } while (len >= STREEBOG_BLOCK_SIZE);
- if (len) {
- memcpy(&ctx->buffer, data, len);
- ctx->fillsize = len;
- }
- return 0;
+ return len;
}
-static int streebog_final(struct shash_desc *desc, u8 *digest)
+static int streebog_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *digest)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
- streebog_stage3(ctx);
- ctx->fillsize = 0;
+ streebog_stage3(ctx, src, len);
if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE)
memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE);
else
@@ -1050,11 +1027,12 @@ static struct shash_alg algs[2] = { {
.digestsize = STREEBOG256_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
- .final = streebog_final,
+ .finup = streebog_finup,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog256",
.cra_driver_name = "streebog256-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
@@ -1062,11 +1040,12 @@ static struct shash_alg algs[2] = { {
.digestsize = STREEBOG512_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
- .final = streebog_final,
+ .finup = streebog_finup,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog512",
.cra_driver_name = "streebog512-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1082,7 +1061,7 @@ static void __exit streebog_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-subsys_initcall(streebog_mod_init);
+module_init(streebog_mod_init);
module_exit(streebog_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 96f4a66be14c..d1d88debbd71 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Quick & dirty crypto testing module.
+ * Quick & dirty crypto benchmarking module.
*
- * This will only exist until we have a better testing mechanism
+ * This will only exist until we have a better benchmarking mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
@@ -39,7 +39,7 @@
#include "tcrypt.h"
/*
- * Need slab memory for testing (size in number of pages).
+ * Need slab memory for benchmarking (size in number of pages).
*/
#define TVMEMSIZE 4
@@ -716,207 +716,6 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
return crypto_wait_req(ret, wait);
}
-struct test_mb_ahash_data {
- struct scatterlist sg[XBUFSIZE];
- char result[64];
- struct ahash_request *req;
- struct crypto_wait wait;
- char *xbuf[XBUFSIZE];
-};
-
-static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb,
- int *rc)
-{
- int i, err;
-
- /* Fire up a bunch of concurrent requests */
- err = crypto_ahash_digest(data[0].req);
-
- /* Wait for all requests to finish */
- err = crypto_wait_req(err, &data[0].wait);
- if (num_mb < 2)
- return err;
-
- for (i = 0; i < num_mb; i++) {
- rc[i] = ahash_request_err(data[i].req);
- if (rc[i]) {
- pr_info("concurrent request %d error %d\n", i, rc[i]);
- err = rc[i];
- }
- }
-
- return err;
-}
-
-static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
- int secs, u32 num_mb)
-{
- unsigned long start, end;
- int bcount;
- int ret = 0;
- int *rc;
-
- rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
-
- for (start = jiffies, end = start + secs * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- ret = do_mult_ahash_op(data, num_mb, rc);
- if (ret)
- goto out;
- }
-
- pr_cont("%d operations in %d seconds (%llu bytes)\n",
- bcount * num_mb, secs, (u64)bcount * blen * num_mb);
-
-out:
- kfree(rc);
- return ret;
-}
-
-static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
- u32 num_mb)
-{
- unsigned long cycles = 0;
- int ret = 0;
- int i;
- int *rc;
-
- rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
-
- /* Warm-up run. */
- for (i = 0; i < 4; i++) {
- ret = do_mult_ahash_op(data, num_mb, rc);
- if (ret)
- goto out;
- }
-
- /* The real thing. */
- for (i = 0; i < 8; i++) {
- cycles_t start, end;
-
- start = get_cycles();
- ret = do_mult_ahash_op(data, num_mb, rc);
- end = get_cycles();
-
- if (ret)
- goto out;
-
- cycles += end - start;
- }
-
- pr_cont("1 operation in %lu cycles (%d bytes)\n",
- (cycles + 4) / (8 * num_mb), blen);
-
-out:
- kfree(rc);
- return ret;
-}
-
-static void test_mb_ahash_speed(const char *algo, unsigned int secs,
- struct hash_speed *speed, u32 num_mb)
-{
- struct test_mb_ahash_data *data;
- struct crypto_ahash *tfm;
- unsigned int i, j, k;
- int ret;
-
- data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
- if (!data)
- return;
-
- tfm = crypto_alloc_ahash(algo, 0, 0);
- if (IS_ERR(tfm)) {
- pr_err("failed to load transform for %s: %ld\n",
- algo, PTR_ERR(tfm));
- goto free_data;
- }
-
- for (i = 0; i < num_mb; ++i) {
- if (testmgr_alloc_buf(data[i].xbuf))
- goto out;
-
- crypto_init_wait(&data[i].wait);
-
- data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!data[i].req) {
- pr_err("alg: hash: Failed to allocate request for %s\n",
- algo);
- goto out;
- }
-
-
- if (i) {
- ahash_request_set_callback(data[i].req, 0, NULL, NULL);
- ahash_request_chain(data[i].req, data[0].req);
- } else
- ahash_request_set_callback(data[0].req, 0,
- crypto_req_done,
- &data[0].wait);
-
- sg_init_table(data[i].sg, XBUFSIZE);
- for (j = 0; j < XBUFSIZE; j++) {
- sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
- memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
- }
- }
-
- pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
- get_driver_name(crypto_ahash, tfm));
-
- for (i = 0; speed[i].blen != 0; i++) {
- /* For some reason this only tests digests. */
- if (speed[i].blen != speed[i].plen)
- continue;
-
- if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
- pr_err("template (%u) too big for tvmem (%lu)\n",
- speed[i].blen, XBUFSIZE * PAGE_SIZE);
- goto out;
- }
-
- if (klen)
- crypto_ahash_setkey(tfm, tvmem[0], klen);
-
- for (k = 0; k < num_mb; k++)
- ahash_request_set_crypt(data[k].req, data[k].sg,
- data[k].result, speed[i].blen);
-
- pr_info("test%3u "
- "(%5u byte blocks,%5u bytes per update,%4u updates): ",
- i, speed[i].blen, speed[i].plen,
- speed[i].blen / speed[i].plen);
-
- if (secs) {
- ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
- num_mb);
- cond_resched();
- } else {
- ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
- }
-
-
- if (ret) {
- pr_err("At least one hashing failed ret=%d\n", ret);
- break;
- }
- }
-
-out:
- ahash_request_free(data[0].req);
-
- for (k = 0; k < num_mb; ++k)
- testmgr_free_buf(data[k].xbuf);
-
- crypto_free_ahash(tfm);
-
-free_data:
- kfree(data);
-}
-
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int secs)
{
@@ -2584,36 +2383,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_ahash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
- case 450:
- test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 451:
- test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 452:
- test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 453:
- test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 454:
- test_mb_ahash_speed("streebog256", sec,
- generic_hash_speed_template, num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 455:
- test_mb_ahash_speed("streebog512", sec,
- generic_hash_speed_template, num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
case 499:
break;
@@ -3099,5 +2868,5 @@ module_param(klen, uint, 0);
MODULE_PARM_DESC(klen, "Key length (defaults to 0)");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Quick & dirty crypto testing module");
+MODULE_DESCRIPTION("Quick & dirty crypto benchmarking module");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 96c843a24607..7f938ac93e58 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Quick & dirty crypto testing module.
+ * Quick & dirty crypto benchmarking module.
*
- * This will only exist until we have a better testing mechanism
+ * This will only exist until we have a better benchmarking mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
diff --git a/crypto/tea.c b/crypto/tea.c
index b315da8c89eb..cb05140e3470 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -255,7 +255,7 @@ MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
-subsys_initcall(tea_mod_init);
+module_init(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index abd609d4c8ef..72005074a5c2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -43,25 +43,17 @@ MODULE_IMPORT_NS("CRYPTO_INTERNAL");
static bool notests;
module_param(notests, bool, 0644);
-MODULE_PARM_DESC(notests, "disable crypto self-tests");
+MODULE_PARM_DESC(notests, "disable all crypto self-tests");
-static bool panic_on_fail;
-module_param(panic_on_fail, bool, 0444);
-
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-static bool noextratests;
-module_param(noextratests, bool, 0644);
-MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
+static bool noslowtests;
+module_param(noslowtests, bool, 0644);
+MODULE_PARM_DESC(noslowtests, "disable slow crypto self-tests");
static unsigned int fuzz_iterations = 100;
module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
-#endif
-
-/* Multibuffer is unlimited. Set arbitrary limit for testing. */
-#define MAX_MB_MSGS 16
-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+#ifndef CONFIG_CRYPTO_SELFTESTS
/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -327,10 +319,9 @@ struct testvec_config {
/*
* The following are the lists of testvec_configs to test for each algorithm
- * type when the basic crypto self-tests are enabled, i.e. when
- * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test
- * coverage, while keeping the test time much shorter than the full fuzz tests
- * so that the basic tests can be enabled in a wider range of circumstances.
+ * type when the fast crypto self-tests are enabled. They aim to provide good
+ * test coverage, while keeping the test time much shorter than the full tests
+ * so that the fast tests can be used to fulfill FIPS 140 testing requirements.
*/
/* Configs for skciphers and aeads */
@@ -879,8 +870,6 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
err; \
})
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-
/*
* The fuzz tests use prandom instead of the normal Linux RNG since they don't
* need cryptographically secure random numbers. This greatly improves the
@@ -1245,15 +1234,6 @@ too_long:
algname);
return -ENAMETOOLONG;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static void crypto_disable_simd_for_test(void)
-{
-}
-
-static void crypto_reenable_simd_for_test(void)
-{
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int build_hash_sglist(struct test_sglist *tsgl,
const struct hash_testvec *vec,
@@ -1694,8 +1674,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -1712,17 +1691,15 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a hash test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
static void generate_random_hash_testvec(struct rnd_state *rng,
- struct shash_desc *desc,
+ struct ahash_request *req,
struct hash_testvec *vec,
unsigned int maxkeysize,
unsigned int maxdatasize,
@@ -1744,16 +1721,17 @@ static void generate_random_hash_testvec(struct rnd_state *rng,
vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
- vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
- vec->ksize);
+ vec->setkey_error = crypto_ahash_setkey(
+ crypto_ahash_reqtfm(req), vec->key, vec->ksize);
/* If the key couldn't be set, no need to continue to digest. */
if (vec->setkey_error)
goto done;
}
/* Digest */
- vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
- vec->psize, (u8 *)vec->digest);
+ vec->digest_error = crypto_hash_digest(
+ crypto_ahash_reqtfm(req), vec->plaintext,
+ vec->psize, (u8 *)vec->digest);
done:
snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
vec->psize, vec->ksize);
@@ -1778,8 +1756,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
const char *driver = crypto_ahash_driver_name(tfm);
struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
- struct crypto_shash *generic_tfm = NULL;
- struct shash_desc *generic_desc = NULL;
+ struct ahash_request *generic_req = NULL;
+ struct crypto_ahash *generic_tfm = NULL;
unsigned int i;
struct hash_testvec vec = { 0 };
char vec_name[64];
@@ -1787,7 +1765,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
init_rnd_state(&rng);
@@ -1802,7 +1780,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
return 0;
- generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
+ generic_tfm = crypto_alloc_ahash(generic_driver, 0, 0);
if (IS_ERR(generic_tfm)) {
err = PTR_ERR(generic_tfm);
if (err == -ENOENT) {
@@ -1821,27 +1799,25 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
goto out;
}
- generic_desc = kzalloc(sizeof(*desc) +
- crypto_shash_descsize(generic_tfm), GFP_KERNEL);
- if (!generic_desc) {
+ generic_req = ahash_request_alloc(generic_tfm, GFP_KERNEL);
+ if (!generic_req) {
err = -ENOMEM;
goto out;
}
- generic_desc->tfm = generic_tfm;
/* Check the algorithm properties for consistency. */
- if (digestsize != crypto_shash_digestsize(generic_tfm)) {
+ if (digestsize != crypto_ahash_digestsize(generic_tfm)) {
pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
driver, digestsize,
- crypto_shash_digestsize(generic_tfm));
+ crypto_ahash_digestsize(generic_tfm));
err = -EINVAL;
goto out;
}
- if (blocksize != crypto_shash_blocksize(generic_tfm)) {
+ if (blocksize != crypto_ahash_blocksize(generic_tfm)) {
pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
- driver, blocksize, crypto_shash_blocksize(generic_tfm));
+ driver, blocksize, crypto_ahash_blocksize(generic_tfm));
err = -EINVAL;
goto out;
}
@@ -1860,7 +1836,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_hash_testvec(&rng, generic_desc, &vec,
+ generate_random_hash_testvec(&rng, generic_req, &vec,
maxkeysize, maxdatasize,
vec_name, sizeof(vec_name));
generate_random_testvec_config(&rng, cfg, cfgname,
@@ -1878,21 +1854,10 @@ out:
kfree(vec.key);
kfree(vec.plaintext);
kfree(vec.digest);
- crypto_free_shash(generic_tfm);
- kfree_sensitive(generic_desc);
+ ahash_request_free(generic_req);
+ crypto_free_ahash(generic_tfm);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_hash_vs_generic_impl(const char *generic_driver,
- unsigned int maxkeysize,
- struct ahash_request *req,
- struct shash_desc *desc,
- struct test_sglist *tsgl,
- u8 *hashstate)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int alloc_shash(const char *driver, u32 type, u32 mask,
struct crypto_shash **tfm_ret,
@@ -1903,7 +1868,7 @@ static int alloc_shash(const char *driver, u32 type, u32 mask,
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
- if (PTR_ERR(tfm) == -ENOENT) {
+ if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) {
/*
* This algorithm is only available through the ahash
* API, not the shash API, so skip the shash tests.
@@ -2266,8 +2231,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -2284,13 +2248,10 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-
-struct aead_extra_tests_ctx {
+struct aead_slow_tests_ctx {
struct rnd_state rng;
struct aead_request *req;
struct crypto_aead *tfm;
@@ -2465,8 +2426,7 @@ static void generate_random_aead_testvec(struct rnd_state *rng,
vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
}
-static void try_to_generate_inauthentic_testvec(
- struct aead_extra_tests_ctx *ctx)
+static void try_to_generate_inauthentic_testvec(struct aead_slow_tests_ctx *ctx)
{
int i;
@@ -2485,7 +2445,7 @@ static void try_to_generate_inauthentic_testvec(
* Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the
* result of an encryption with the key) and verify that decryption fails.
*/
-static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
+static int test_aead_inauthentic_inputs(struct aead_slow_tests_ctx *ctx)
{
unsigned int i;
int err;
@@ -2520,7 +2480,7 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
* Test the AEAD algorithm against the corresponding generic implementation, if
* one is available.
*/
-static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
+static int test_aead_vs_generic_impl(struct aead_slow_tests_ctx *ctx)
{
struct crypto_aead *tfm = ctx->tfm;
const char *algname = crypto_aead_alg(tfm)->base.cra_name;
@@ -2624,15 +2584,15 @@ out:
return err;
}
-static int test_aead_extra(const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
+static int test_aead_slow(const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
{
- struct aead_extra_tests_ctx *ctx;
+ struct aead_slow_tests_ctx *ctx;
unsigned int i;
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -2674,14 +2634,6 @@ out:
kfree(ctx);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_aead_extra(const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_aead(int enc, const struct aead_test_suite *suite,
struct aead_request *req,
@@ -2747,7 +2699,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
if (err)
goto out;
- err = test_aead_extra(desc, req, tsgls);
+ err = test_aead_slow(desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
@@ -3021,8 +2973,7 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -3039,11 +2990,9 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a symmetric cipher test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
@@ -3126,7 +3075,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
init_rnd_state(&rng);
@@ -3242,14 +3191,6 @@ out:
skcipher_request_free(generic_req);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_skcipher_vs_generic_impl(const char *generic_driver,
- struct skcipher_request *req,
- struct cipher_test_sglists *tsgls)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_skcipher(int enc, const struct cipher_test_suite *suite,
struct skcipher_request *req,
@@ -3329,48 +3270,27 @@ static int test_acomp(struct crypto_acomp *tfm,
int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
- struct scatterlist *src = NULL, *dst = NULL;
- struct acomp_req *reqs[MAX_MB_MSGS] = {};
- char *decomp_out[MAX_MB_MSGS] = {};
- char *output[MAX_MB_MSGS] = {};
- struct crypto_wait wait;
- struct acomp_req *req;
- int ret = -ENOMEM;
unsigned int i;
+ char *output, *decomp_out;
+ int ret;
+ struct scatterlist src, dst;
+ struct acomp_req *req;
+ struct crypto_wait wait;
- src = kmalloc_array(MAX_MB_MSGS, sizeof(*src), GFP_KERNEL);
- if (!src)
- goto out;
- dst = kmalloc_array(MAX_MB_MSGS, sizeof(*dst), GFP_KERNEL);
- if (!dst)
- goto out;
-
- for (i = 0; i < MAX_MB_MSGS; i++) {
- reqs[i] = acomp_request_alloc(tfm);
- if (!reqs[i])
- goto out;
-
- acomp_request_set_callback(reqs[i],
- CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
- if (i)
- acomp_request_chain(reqs[i], reqs[0]);
-
- output[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!output[i])
- goto out;
+ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
- decomp_out[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!decomp_out[i])
- goto out;
+ decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!decomp_out) {
+ kfree(output);
+ return -ENOMEM;
}
for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen;
void *input_vec;
- int j;
input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
if (!input_vec) {
@@ -3378,61 +3298,70 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+ memset(output, 0, dlen);
crypto_init_wait(&wait);
- sg_init_one(src, input_vec, ilen);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- sg_init_one(dst + j, output[j], dlen);
- acomp_request_set_params(reqs[j], src, dst + j, ilen, dlen);
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ kfree(input_vec);
+ ret = -ENOMEM;
+ goto out;
}
- req = reqs[0];
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
+ acomp_request_free(req);
goto out;
}
ilen = req->dlen;
dlen = COMP_BUF_SIZE;
+ sg_init_one(&src, output, ilen);
+ sg_init_one(&dst, decomp_out, dlen);
crypto_init_wait(&wait);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- sg_init_one(src + j, output[j], ilen);
- sg_init_one(dst + j, decomp_out[j], dlen);
- acomp_request_set_params(reqs[j], src + j, dst + j, ilen, dlen);
- }
-
- crypto_wait_req(crypto_acomp_decompress(req), &wait);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- ret = reqs[j]->base.err;
- if (ret) {
- pr_err("alg: acomp: compression failed on test %d (%d) for %s: ret=%d\n",
- i + 1, j, algo, -ret);
- kfree(input_vec);
- goto out;
- }
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
- if (reqs[j]->dlen != ctemplate[i].inlen) {
- pr_err("alg: acomp: Compression test %d (%d) failed for %s: output len = %d\n",
- i + 1, j, algo, reqs[j]->dlen);
- ret = -EINVAL;
- kfree(input_vec);
- goto out;
- }
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
+ if (ret) {
+ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
- if (memcmp(input_vec, decomp_out[j], reqs[j]->dlen)) {
- pr_err("alg: acomp: Compression test %d (%d) failed for %s\n",
- i + 1, j, algo);
- hexdump(output[j], reqs[j]->dlen);
- ret = -EINVAL;
- kfree(input_vec);
- goto out;
- }
+ if (req->dlen != ctemplate[i].inlen) {
+ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(input_vec, decomp_out, req->dlen)) {
+ pr_err("alg: acomp: Compression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
}
kfree(input_vec);
+ acomp_request_free(req);
}
for (i = 0; i < dtcount; i++) {
@@ -3446,9 +3375,10 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+ memset(output, 0, dlen);
crypto_init_wait(&wait);
- sg_init_one(src, input_vec, ilen);
- sg_init_one(dst, output[0], dlen);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm);
if (!req) {
@@ -3459,7 +3389,7 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
- acomp_request_set_params(req, src, dst, ilen, dlen);
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
@@ -3481,10 +3411,10 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
- if (memcmp(output[0], dtemplate[i].output, req->dlen)) {
+ if (memcmp(output, dtemplate[i].output, req->dlen)) {
pr_err("alg: acomp: Decompression test %d failed for %s\n",
i + 1, algo);
- hexdump(output[0], req->dlen);
+ hexdump(output, req->dlen);
ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req);
@@ -3498,13 +3428,8 @@ static int test_acomp(struct crypto_acomp *tfm,
ret = 0;
out:
- acomp_request_free(reqs[0]);
- for (i = 0; i < MAX_MB_MSGS; i++) {
- kfree(output[i]);
- kfree(decomp_out[i]);
- }
- kfree(dst);
- kfree(src);
+ kfree(decomp_out);
+ kfree(output);
return ret;
}
@@ -5426,12 +5351,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
- .alg = "poly1305",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(poly1305_tv_template)
- }
- }, {
.alg = "polyval",
.test = alg_test_hash,
.suite = {
@@ -5788,9 +5707,8 @@ static void testmgr_onetime_init(void)
alg_check_test_descs_order();
alg_check_testvec_configs();
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n");
-#endif
+ if (!noslowtests)
+ pr_warn("alg: full crypto tests enabled. This is intended for developer use only.\n");
}
static int alg_find_test(const char *alg)
@@ -5879,11 +5797,10 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
test_done:
if (rc) {
- if (fips_enabled || panic_on_fail) {
+ if (fips_enabled) {
fips_fail_notify();
- panic("alg: self-tests for %s (%s) failed in %s mode!\n",
- driver, alg,
- fips_enabled ? "fips" : "panic_on_fail");
+ panic("alg: self-tests for %s (%s) failed in fips mode!\n",
+ driver, alg);
}
pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
@@ -5928,6 +5845,6 @@ non_fips_alg:
return alg_fips_disabled(driver, alg);
}
-#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
+#endif /* CONFIG_CRYPTO_SELFTESTS */
EXPORT_SYMBOL_GPL(alg_test);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index afc10af59b0a..32d099ac9e73 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -8836,294 +8836,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = {
},
};
-/*
- * Poly1305 test vectors from RFC7539 A.3.
- */
-
-static const struct hash_testvec poly1305_tv_template[] = {
- { /* Test Vector #1 */
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 96,
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #2 */
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e"
- "\x41\x6e\x79\x20\x73\x75\x62\x6d"
- "\x69\x73\x73\x69\x6f\x6e\x20\x74"
- "\x6f\x20\x74\x68\x65\x20\x49\x45"
- "\x54\x46\x20\x69\x6e\x74\x65\x6e"
- "\x64\x65\x64\x20\x62\x79\x20\x74"
- "\x68\x65\x20\x43\x6f\x6e\x74\x72"
- "\x69\x62\x75\x74\x6f\x72\x20\x66"
- "\x6f\x72\x20\x70\x75\x62\x6c\x69"
- "\x63\x61\x74\x69\x6f\x6e\x20\x61"
- "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
- "\x20\x70\x61\x72\x74\x20\x6f\x66"
- "\x20\x61\x6e\x20\x49\x45\x54\x46"
- "\x20\x49\x6e\x74\x65\x72\x6e\x65"
- "\x74\x2d\x44\x72\x61\x66\x74\x20"
- "\x6f\x72\x20\x52\x46\x43\x20\x61"
- "\x6e\x64\x20\x61\x6e\x79\x20\x73"
- "\x74\x61\x74\x65\x6d\x65\x6e\x74"
- "\x20\x6d\x61\x64\x65\x20\x77\x69"
- "\x74\x68\x69\x6e\x20\x74\x68\x65"
- "\x20\x63\x6f\x6e\x74\x65\x78\x74"
- "\x20\x6f\x66\x20\x61\x6e\x20\x49"
- "\x45\x54\x46\x20\x61\x63\x74\x69"
- "\x76\x69\x74\x79\x20\x69\x73\x20"
- "\x63\x6f\x6e\x73\x69\x64\x65\x72"
- "\x65\x64\x20\x61\x6e\x20\x22\x49"
- "\x45\x54\x46\x20\x43\x6f\x6e\x74"
- "\x72\x69\x62\x75\x74\x69\x6f\x6e"
- "\x22\x2e\x20\x53\x75\x63\x68\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x63\x6c\x75"
- "\x64\x65\x20\x6f\x72\x61\x6c\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x20\x49\x45"
- "\x54\x46\x20\x73\x65\x73\x73\x69"
- "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
- "\x77\x65\x6c\x6c\x20\x61\x73\x20"
- "\x77\x72\x69\x74\x74\x65\x6e\x20"
- "\x61\x6e\x64\x20\x65\x6c\x65\x63"
- "\x74\x72\x6f\x6e\x69\x63\x20\x63"
- "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
- "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
- "\x64\x65\x20\x61\x74\x20\x61\x6e"
- "\x79\x20\x74\x69\x6d\x65\x20\x6f"
- "\x72\x20\x70\x6c\x61\x63\x65\x2c"
- "\x20\x77\x68\x69\x63\x68\x20\x61"
- "\x72\x65\x20\x61\x64\x64\x72\x65"
- "\x73\x73\x65\x64\x20\x74\x6f",
- .psize = 407,
- .digest = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e",
- }, { /* Test Vector #3 */
- .plaintext = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x41\x6e\x79\x20\x73\x75\x62\x6d"
- "\x69\x73\x73\x69\x6f\x6e\x20\x74"
- "\x6f\x20\x74\x68\x65\x20\x49\x45"
- "\x54\x46\x20\x69\x6e\x74\x65\x6e"
- "\x64\x65\x64\x20\x62\x79\x20\x74"
- "\x68\x65\x20\x43\x6f\x6e\x74\x72"
- "\x69\x62\x75\x74\x6f\x72\x20\x66"
- "\x6f\x72\x20\x70\x75\x62\x6c\x69"
- "\x63\x61\x74\x69\x6f\x6e\x20\x61"
- "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
- "\x20\x70\x61\x72\x74\x20\x6f\x66"
- "\x20\x61\x6e\x20\x49\x45\x54\x46"
- "\x20\x49\x6e\x74\x65\x72\x6e\x65"
- "\x74\x2d\x44\x72\x61\x66\x74\x20"
- "\x6f\x72\x20\x52\x46\x43\x20\x61"
- "\x6e\x64\x20\x61\x6e\x79\x20\x73"
- "\x74\x61\x74\x65\x6d\x65\x6e\x74"
- "\x20\x6d\x61\x64\x65\x20\x77\x69"
- "\x74\x68\x69\x6e\x20\x74\x68\x65"
- "\x20\x63\x6f\x6e\x74\x65\x78\x74"
- "\x20\x6f\x66\x20\x61\x6e\x20\x49"
- "\x45\x54\x46\x20\x61\x63\x74\x69"
- "\x76\x69\x74\x79\x20\x69\x73\x20"
- "\x63\x6f\x6e\x73\x69\x64\x65\x72"
- "\x65\x64\x20\x61\x6e\x20\x22\x49"
- "\x45\x54\x46\x20\x43\x6f\x6e\x74"
- "\x72\x69\x62\x75\x74\x69\x6f\x6e"
- "\x22\x2e\x20\x53\x75\x63\x68\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x63\x6c\x75"
- "\x64\x65\x20\x6f\x72\x61\x6c\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x20\x49\x45"
- "\x54\x46\x20\x73\x65\x73\x73\x69"
- "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
- "\x77\x65\x6c\x6c\x20\x61\x73\x20"
- "\x77\x72\x69\x74\x74\x65\x6e\x20"
- "\x61\x6e\x64\x20\x65\x6c\x65\x63"
- "\x74\x72\x6f\x6e\x69\x63\x20\x63"
- "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
- "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
- "\x64\x65\x20\x61\x74\x20\x61\x6e"
- "\x79\x20\x74\x69\x6d\x65\x20\x6f"
- "\x72\x20\x70\x6c\x61\x63\x65\x2c"
- "\x20\x77\x68\x69\x63\x68\x20\x61"
- "\x72\x65\x20\x61\x64\x64\x72\x65"
- "\x73\x73\x65\x64\x20\x74\x6f",
- .psize = 407,
- .digest = "\xf3\x47\x7e\x7c\xd9\x54\x17\xaf"
- "\x89\xa6\xb8\x79\x4c\x31\x0c\xf0",
- }, { /* Test Vector #4 */
- .plaintext = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
- "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
- "\x47\x39\x17\xc1\x40\x2b\x80\x09"
- "\x9d\xca\x5c\xbc\x20\x70\x75\xc0"
- "\x27\x54\x77\x61\x73\x20\x62\x72"
- "\x69\x6c\x6c\x69\x67\x2c\x20\x61"
- "\x6e\x64\x20\x74\x68\x65\x20\x73"
- "\x6c\x69\x74\x68\x79\x20\x74\x6f"
- "\x76\x65\x73\x0a\x44\x69\x64\x20"
- "\x67\x79\x72\x65\x20\x61\x6e\x64"
- "\x20\x67\x69\x6d\x62\x6c\x65\x20"
- "\x69\x6e\x20\x74\x68\x65\x20\x77"
- "\x61\x62\x65\x3a\x0a\x41\x6c\x6c"
- "\x20\x6d\x69\x6d\x73\x79\x20\x77"
- "\x65\x72\x65\x20\x74\x68\x65\x20"
- "\x62\x6f\x72\x6f\x67\x6f\x76\x65"
- "\x73\x2c\x0a\x41\x6e\x64\x20\x74"
- "\x68\x65\x20\x6d\x6f\x6d\x65\x20"
- "\x72\x61\x74\x68\x73\x20\x6f\x75"
- "\x74\x67\x72\x61\x62\x65\x2e",
- .psize = 159,
- .digest = "\x45\x41\x66\x9a\x7e\xaa\xee\x61"
- "\xe7\x08\xdc\x7c\xbc\xc5\xeb\x62",
- }, { /* Test Vector #5 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .psize = 48,
- .digest = "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #6 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 48,
- .digest = "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #7 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xf0\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\x11\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 80,
- .digest = "\x05\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #8 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xfb\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
- "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .psize = 80,
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #9 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xfd\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .psize = 48,
- .digest = "\xfa\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- }, { /* Test Vector #10 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xe3\x35\x94\xd7\x50\x5e\x43\xb9"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x33\x94\xd7\x50\x5e\x43\x79\xcd"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 96,
- .digest = "\x14\x00\x00\x00\x00\x00\x00\x00"
- "\x55\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #11 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xe3\x35\x94\xd7\x50\x5e\x43\xb9"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x33\x94\xd7\x50\x5e\x43\x79\xcd"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 80,
- .digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Regression test for overflow in AVX2 implementation */
- .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff",
- .psize = 300,
- .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
- "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
- }
-};
-
/* NHPoly1305 test vectors from https://github.com/google/adiantum */
static const struct hash_testvec nhpoly1305_tv_template[] = {
{
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 19f2b365e140..368018cfa9bf 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -187,7 +187,7 @@ static void __exit twofish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(twofish_mod_init);
+module_init(twofish_mod_init);
module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 07994e5ebf4e..41f13d490333 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1169,7 +1169,7 @@ MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
-subsys_initcall(wp512_mod_init);
+module_init(wp512_mod_init);
module_exit(wp512_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index fc785667b134..6c5f6766fdd6 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -8,9 +8,12 @@
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
@@ -30,22 +33,6 @@ struct xcbc_tfm_ctx {
u8 consts[];
};
-/*
- * +------------------------
- * | <shash desc>
- * +------------------------
- * | xcbc_desc_ctx
- * +------------------------
- * | odds (block size)
- * +------------------------
- * | prev (block size)
- * +------------------------
- */
-struct xcbc_desc_ctx {
- unsigned int len;
- u8 odds[];
-};
-
#define XCBC_BLOCKSIZE 16
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
@@ -70,13 +57,10 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = &ctx->odds[bs];
+ u8 *prev = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(prev, 0, bs);
-
return 0;
}
@@ -85,77 +69,36 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(odds + ctx->len, p, len);
- ctx->len += len;
- return 0;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- crypto_xor(prev, odds, bs);
- crypto_cipher_encrypt_one(tfm, prev, prev);
+ u8 *prev = shash_desc_ctx(pdesc);
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
+ do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(odds, p, len);
- ctx->len = len;
- }
-
- return 0;
+ } while (len >= bs);
+ return len;
}
-static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_xcbc_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
- if (ctx->len != bs) {
- unsigned int rlen;
- u8 *p = odds + ctx->len;
-
- *p = 0x80;
- p++;
-
- rlen = bs - ctx->len -1;
- if (rlen)
- memset(p, 0, rlen);
-
+ crypto_xor(prev, src, len);
+ if (len != bs) {
+ prev[len] ^= 0x80;
offset += bs;
}
-
- crypto_xor(prev, odds, bs);
crypto_xor(prev, &tctx->consts[offset], bs);
-
crypto_cipher_encrypt_one(tfm, out, prev);
-
return 0;
}
@@ -216,17 +159,18 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) +
alg->cra_blocksize * 2;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct xcbc_desc_ctx) +
- alg->cra_blocksize * 2;
+ inst->alg.descsize = alg->cra_blocksize;
inst->alg.base.cra_init = xcbc_init_tfm;
inst->alg.base.cra_exit = xcbc_exit_tfm;
inst->alg.init = crypto_xcbc_digest_init;
inst->alg.update = crypto_xcbc_digest_update;
- inst->alg.final = crypto_xcbc_digest_final;
+ inst->alg.finup = crypto_xcbc_digest_finup;
inst->alg.setkey = crypto_xcbc_digest_setkey;
inst->free = shash_free_singlespawn_instance;
@@ -255,7 +199,7 @@ static void __exit crypto_xcbc_module_exit(void)
crypto_unregister_template(&crypto_xcbc_tmpl);
}
-subsys_initcall(crypto_xcbc_module_init);
+module_init(crypto_xcbc_module_init);
module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xctr.c b/crypto/xctr.c
index 9c536ab6d2e5..607ab82cb19b 100644
--- a/crypto/xctr.c
+++ b/crypto/xctr.c
@@ -182,7 +182,7 @@ static void __exit crypto_xctr_module_exit(void)
crypto_unregister_template(&crypto_xctr_tmpl);
}
-subsys_initcall(crypto_xctr_module_init);
+module_init(crypto_xctr_module_init);
module_exit(crypto_xctr_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xts.c b/crypto/xts.c
index 31529c9ef08f..3da8f5e053d6 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -363,7 +363,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
- if (err == -ENOENT) {
+ if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
err = -ENAMETOOLONG;
if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -397,7 +397,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
- if (!strncmp(cipher_name, "ecb(", 4)) {
+ if (!memcmp(cipher_name, "ecb(", 4)) {
int len;
len = strscpy(name, cipher_name + 4, sizeof(name));
@@ -466,7 +466,7 @@ static void __exit xts_module_exit(void)
crypto_unregister_template(&xts_tmpl);
}
-subsys_initcall(xts_module_init);
+module_init(xts_module_init);
module_exit(xts_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c
index ac206ad4184d..175bb7ae0fcd 100644
--- a/crypto/xxhash_generic.c
+++ b/crypto/xxhash_generic.c
@@ -96,7 +96,7 @@ static void __exit xxhash_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(xxhash_mod_init);
+module_init(xxhash_mod_init);
module_exit(xxhash_mod_fini);
MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>");
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 90bb4f36f846..7570e11b4ee6 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -196,7 +196,7 @@ static void __exit zstd_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(zstd_mod_init);
+module_init(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");