summaryrefslogtreecommitdiff
path: root/include/crypto/internal
diff options
context:
space:
mode:
Diffstat (limited to 'include/crypto/internal')
-rw-r--r--include/crypto/internal/acompress.h128
-rw-r--r--include/crypto/internal/blake2b.h92
-rw-r--r--include/crypto/internal/blockhash.h52
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/engine.h5
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h117
-rw-r--r--include/crypto/internal/poly1305.h28
-rw-r--r--include/crypto/internal/scompress.h17
-rw-r--r--include/crypto/internal/sha2.h66
-rw-r--r--include/crypto/internal/simd.h10
-rw-r--r--include/crypto/internal/skcipher.h49
12 files changed, 416 insertions, 192 deletions
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index aaf59f3236fa..ffffd88bbbad 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -11,12 +11,17 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/compiler_types.h>
+#include <linux/cpumask_types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
-#define ACOMP_REQUEST_ON_STACK(name, tfm) \
+#define ACOMP_FBREQ_ON_STACK(name, req) \
char __##name##_req[sizeof(struct acomp_req) + \
MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
- struct acomp_req *name = acomp_request_on_stack_init( \
- __##name##_req, (tfm), 0, true)
+ struct acomp_req *name = acomp_fbreq_on_stack_init( \
+ __##name##_req, (req))
/**
* struct acomp_alg - asynchronous compression algorithm
@@ -35,9 +40,7 @@
* counterpart to @init, used to remove various changes set in
* @init.
*
- * @reqsize: Context size for (de)compression requests
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with scomp
*/
struct acomp_alg {
@@ -46,14 +49,61 @@ struct acomp_alg {
int (*init)(struct crypto_acomp *tfm);
void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
-
union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
};
+struct crypto_acomp_stream {
+ spinlock_t lock;
+ void *ctx;
+};
+
+struct crypto_acomp_streams {
+ /* These must come first because of struct scomp_alg. */
+ void *(*alloc_ctx)(void);
+ union {
+ void (*free_ctx)(void *);
+ void (*cfree_ctx)(const void *);
+ };
+
+ struct crypto_acomp_stream __percpu *streams;
+ struct work_struct stream_work;
+ cpumask_t stream_want;
+};
+
+struct acomp_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int slen;
+ unsigned int dlen;
+
+ int flags;
+};
+
/*
* Transform internal helpers.
*/
@@ -98,17 +148,10 @@ void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
-static inline bool acomp_request_chained(struct acomp_req *req)
-{
- return crypto_request_chained(&req->base);
-}
-
static inline bool acomp_request_issg(struct acomp_req *req)
{
return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
- CRYPTO_ACOMP_REQ_DST_VIRT |
- CRYPTO_ACOMP_REQ_SRC_FOLIO |
- CRYPTO_ACOMP_REQ_DST_FOLIO));
+ CRYPTO_ACOMP_REQ_DST_VIRT));
}
static inline bool acomp_request_src_isvirt(struct acomp_req *req)
@@ -143,19 +186,62 @@ static inline bool acomp_request_isnondma(struct acomp_req *req)
CRYPTO_ACOMP_REQ_DST_NONDMA);
}
-static inline bool acomp_request_src_isfolio(struct acomp_req *req)
+static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream);
+
+static inline void crypto_acomp_unlock_stream_bh(
+ struct crypto_acomp_stream *stream) __releases(stream)
+{
+ spin_unlock_bh(&stream->lock);
+}
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used);
+void acomp_walk_done_dst(struct acomp_walk *walk, int used);
+int acomp_walk_next_src(struct acomp_walk *walk);
+int acomp_walk_next_dst(struct acomp_walk *walk);
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic);
+
+static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
+{
+ return walk->slen != cur;
+}
+
+static inline u32 acomp_request_flags(struct acomp_req *req)
{
- return req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO;
+ return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
}
-static inline bool acomp_request_dst_isfolio(struct acomp_req *req)
+static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
{
- return req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO;
+ return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
}
-static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm)
+static inline struct acomp_req *acomp_fbreq_on_stack_init(
+ char *buf, struct acomp_req *old)
{
- return crypto_tfm_req_chain(&tfm->base);
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
+ struct acomp_req *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_acomp_tfm(crypto_acomp_fb(tfm)));
+ acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ req->src = old->src;
+ req->dst = old->dst;
+ req->slen = old->slen;
+ req->dlen = old->dlen;
+
+ return req;
}
#endif
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
index 982fe5e8471c..3e09e2485306 100644
--- a/include/crypto/internal/blake2b.h
+++ b/include/crypto/internal/blake2b.h
@@ -7,65 +7,36 @@
#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
#define _CRYPTO_INTERNAL_BLAKE2B_H
+#include <asm/byteorder.h>
#include <crypto/blake2b.h>
#include <crypto/internal/hash.h>
+#include <linux/array_size.h>
+#include <linux/compiler.h>
+#include <linux/build_bug.h>
+#include <linux/errno.h>
+#include <linux/math.h>
#include <linux/string.h>
-
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
+#include <linux/types.h>
static inline void blake2b_set_lastblock(struct blake2b_state *state)
{
state->f[0] = -1;
+ state->f[1] = 0;
}
-typedef void (*blake2b_compress_t)(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-static inline void __blake2b_update(struct blake2b_state *state,
- const u8 *in, size_t inlen,
- blake2b_compress_t compress)
+static inline void blake2b_set_nonlast(struct blake2b_state *state)
{
- const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2B_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
- in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
+ state->f[0] = 0;
+ state->f[1] = 0;
}
-static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
- blake2b_compress_t compress)
-{
- int i;
-
- blake2b_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
- for (i = 0; i < ARRAY_SIZE(state->h); i++)
- __cpu_to_le64s(&state->h[i]);
- memcpy(out, state->h, state->outlen);
-}
+typedef void (*blake2b_compress_t)(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
/* Helper functions for shash implementations of BLAKE2b */
struct blake2b_tfm_ctx {
- u8 key[BLAKE2B_KEY_SIZE];
+ u8 key[BLAKE2B_BLOCK_SIZE];
unsigned int keylen;
};
@@ -74,10 +45,13 @@ static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
{
struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
+ if (keylen > BLAKE2B_KEY_SIZE)
return -EINVAL;
+ BUILD_BUG_ON(BLAKE2B_KEY_SIZE > BLAKE2B_BLOCK_SIZE);
+
memcpy(tctx->key, key, keylen);
+ memset(tctx->key + keylen, 0, BLAKE2B_BLOCK_SIZE - keylen);
tctx->keylen = keylen;
return 0;
@@ -89,26 +63,38 @@ static inline int crypto_blake2b_init(struct shash_desc *desc)
struct blake2b_state *state = shash_desc_ctx(desc);
unsigned int outlen = crypto_shash_digestsize(desc->tfm);
- __blake2b_init(state, outlen, tctx->key, tctx->keylen);
- return 0;
+ __blake2b_init(state, outlen, tctx->keylen);
+ return tctx->keylen ?
+ crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0;
}
-static inline int crypto_blake2b_update(struct shash_desc *desc,
- const u8 *in, unsigned int inlen,
- blake2b_compress_t compress)
+static inline int crypto_blake2b_update_bo(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen,
+ blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
- __blake2b_update(state, in, inlen, compress);
- return 0;
+ blake2b_set_nonlast(state);
+ compress(state, in, inlen / BLAKE2B_BLOCK_SIZE, BLAKE2B_BLOCK_SIZE);
+ return inlen - round_down(inlen, BLAKE2B_BLOCK_SIZE);
}
-static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
+static inline int crypto_blake2b_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out,
blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
+ u8 buf[BLAKE2B_BLOCK_SIZE];
+ int i;
- __blake2b_final(state, out, compress);
+ memcpy(buf, in, inlen);
+ memset(buf + inlen, 0, BLAKE2B_BLOCK_SIZE - inlen);
+ blake2b_set_lastblock(state);
+ compress(state, buf, 1, inlen);
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+ memcpy(out, state->h, crypto_shash_digestsize(desc->tfm));
+ memzero_explicit(buf, sizeof(buf));
return 0;
}
diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h
new file mode 100644
index 000000000000..52d9d4c82493
--- /dev/null
+++ b/include/crypto/internal/blockhash.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Handle partial blocks for block hash.
+ *
+ * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H
+#define _CRYPTO_INTERNAL_BLOCKHASH_H
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \
+ buf, buflen) \
+ ({ \
+ typeof(block_fn) *_block_fn = &(block_fn); \
+ typeof(state + 0) _state = (state); \
+ unsigned int _buflen = (buflen); \
+ size_t _nbytes = (nbytes); \
+ unsigned int _bs = (bs); \
+ const u8 *_src = (src); \
+ u8 *_buf = (buf); \
+ while ((_buflen + _nbytes) >= _bs) { \
+ const u8 *data = _src; \
+ size_t len = _nbytes; \
+ size_t blocks; \
+ int remain; \
+ if (_buflen) { \
+ remain = _bs - _buflen; \
+ memcpy(_buf + _buflen, _src, remain); \
+ data = _buf; \
+ len = _bs; \
+ } \
+ remain = len % bs; \
+ blocks = (len - remain) / (dv); \
+ (*_block_fn)(_state, data, blocks); \
+ _src += len - remain - _buflen; \
+ _nbytes -= len - remain - _buflen; \
+ _buflen = 0; \
+ } \
+ memcpy(_buf + _buflen, _src, _nbytes); \
+ _buflen += _nbytes; \
+ })
+
+#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen)
+#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen)
+
+#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
deleted file mode 100644
index b085dc1ac151..000000000000
--- a/include/crypto/internal/chacha.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _CRYPTO_INTERNAL_CHACHA_H
-#define _CRYPTO_INTERNAL_CHACHA_H
-
-#include <crypto/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
-
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize, int nrounds)
-{
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- ctx->nrounds = nrounds;
- return 0;
-}
-
-static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 20);
-}
-
-static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 12);
-}
-
-#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index fbf4be56cf12..b6a4ea2240fc 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -27,10 +27,10 @@ struct device;
* @retry_support: indication that the hardware allows re-execution
* of a failed backlog request
* crypto-engine, in head position to keep order
+ * @rt: whether this queue is set to run as a realtime task
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
* @prepare_crypt_hardware: a request will soon arrive from the queue
* so the subsystem requests the driver to prepare the hardware
* by issuing this call
@@ -51,14 +51,13 @@ struct crypto_engine {
bool running;
bool retry_support;
+ bool rt;
struct list_head list;
spinlock_t queue_lock;
struct crypto_queue queue;
struct device *dev;
- bool rt;
-
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
int (*do_batch_requests)(struct crypto_engine *engine);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 7fd7126f593a..012f5fb22d43 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,7 +15,6 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 052ac7924af3..0f85c543f80b 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -11,6 +11,24 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
+/* Set this bit to handle partial blocks in the API. */
+#define CRYPTO_AHASH_ALG_BLOCK_ONLY 0x01000000
+
+/* Set this bit if final requires at least one byte. */
+#define CRYPTO_AHASH_ALG_FINAL_NONZERO 0x02000000
+
+/* Set this bit if finup can deal with multiple blocks. */
+#define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000
+
+/* This bit is set by the Crypto API if export_core is not supported. */
+#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000
+
+#define HASH_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = ahash_fbreq_on_stack_init( \
+ __##name##_req, (req))
+
struct ahash_request;
struct ahash_instance {
@@ -49,6 +67,7 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
+void ahash_free_singlespawn_instance(struct ahash_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -58,12 +77,20 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{
return crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
+static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
+{
+ return crypto_hash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -187,7 +214,7 @@ static inline void ahash_request_complete(struct ahash_request *req, int err)
static inline u32 ahash_request_flags(struct ahash_request *req)
{
- return req->base.flags;
+ return crypto_request_flags(&req->base) & ~CRYPTO_AHASH_REQ_PRIVATE;
}
static inline struct crypto_ahash *crypto_spawn_ahash(
@@ -247,20 +274,96 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_shash, base);
}
-static inline bool ahash_request_chained(struct ahash_request *req)
+static inline bool ahash_request_isvirt(struct ahash_request *req)
{
- return false;
+ return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
}
-static inline bool ahash_request_isvirt(struct ahash_request *req)
+static inline bool crypto_ahash_req_virt(struct crypto_ahash *tfm)
{
- return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
+ return crypto_tfm_req_virt(&tfm->base);
}
-static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm)
+static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm)
{
- return crypto_tfm_req_chain(&tfm->base);
+ return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb);
}
+static inline struct ahash_request *ahash_fbreq_on_stack_init(
+ char *buf, struct ahash_request *old)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(old);
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_ahash_tfm(crypto_ahash_fb(tfm)));
+ ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ req->src = old->src;
+ req->result = old->result;
+ req->nbytes = old->nbytes;
+
+ return req;
+}
+
+/* Return the state size without partial block for block-only algorithms. */
+static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm)
+{
+ return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1;
+}
+
+/* This can only be used if the request was never cloned. */
+#define HASH_REQUEST_ZERO(name) \
+ memzero_explicit(__##name##_req, sizeof(__##name##_req))
+
+/**
+ * crypto_ahash_export_core() - extract core state for message digest
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_ahash_export_core(struct ahash_request *req, void *out);
+
+/**
+ * crypto_ahash_import_core() - import core state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_ahash_import_core(struct ahash_request *req, const void *in);
+
+/**
+ * crypto_shash_export_core() - extract core state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_shash_export_core(struct shash_desc *desc, void *out);
+
+/**
+ * crypto_shash_import_core() - import core state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_shash_import_core(struct shash_desc *desc, const void *in);
+
#endif /* _CRYPTO_INTERNAL_HASH_H */
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index e614594f88c1..c60315f47562 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -6,9 +6,8 @@
#ifndef _CRYPTO_INTERNAL_POLY1305_H
#define _CRYPTO_INTERNAL_POLY1305_H
-#include <linux/unaligned.h>
-#include <linux/types.h>
#include <crypto/poly1305.h>
+#include <linux/types.h>
/*
* Poly1305 core functions. These only accept whole blocks; the caller must
@@ -31,4 +30,29 @@ void poly1305_core_blocks(struct poly1305_state *state,
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst);
+void poly1305_block_init_arch(struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+void poly1305_block_init_generic(struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
+ unsigned int len, u32 padbit);
+
+static inline void poly1305_blocks_generic(struct poly1305_block_state *state,
+ const u8 *src, unsigned int len,
+ u32 padbit)
+{
+ poly1305_core_blocks(&state->h, &state->core_r, src,
+ len / POLY1305_BLOCK_SIZE, padbit);
+}
+
+void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE], const u32 nonce[4]);
+
+static inline void poly1305_emit_generic(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4])
+{
+ poly1305_core_emit(state, nonce, digest);
+}
+
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index f25aa2ea3b48..533d6c16a491 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -9,10 +9,7 @@
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <crypto/acompress.h>
-#include <crypto/algapi.h>
-
-struct acomp_req;
+#include <crypto/internal/acompress.h>
struct crypto_scomp {
struct crypto_tfm base;
@@ -26,12 +23,10 @@ struct crypto_scomp {
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
+ * @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
- void *(*alloc_ctx)(void);
- void (*free_ctx)(void *ctx);
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
@@ -40,6 +35,14 @@ struct scomp_alg {
void *ctx);
union {
+ struct {
+ void *(*alloc_ctx)(void);
+ void (*free_ctx)(void *ctx);
+ };
+ struct crypto_acomp_streams streams;
+ };
+
+ union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h
new file mode 100644
index 000000000000..b9bccd3ff57f
--- /dev/null
+++ b/include/crypto/internal/sha2.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _CRYPTO_INTERNAL_SHA2_H
+#define _CRYPTO_INTERNAL_SHA2_H
+
+#include <crypto/internal/simd.h>
+#include <crypto/sha2.h>
+#include <linux/compiler_attributes.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256)
+bool sha256_is_arch_optimized(void);
+#else
+static inline bool sha256_is_arch_optimized(void)
+{
+ return false;
+}
+#endif
+void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+static inline void sha256_choose_blocks(
+ u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks,
+ bool force_generic, bool force_simd)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic)
+ sha256_blocks_generic(state, data, nblocks);
+ else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) &&
+ (force_simd || crypto_simd_usable()))
+ sha256_blocks_simd(state, data, nblocks);
+ else
+ sha256_blocks_arch(state, data, nblocks);
+}
+
+static __always_inline void sha256_finup(
+ struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE],
+ size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size,
+ bool force_generic, bool force_simd)
+{
+ const size_t bit_offset = SHA256_BLOCK_SIZE - 8;
+ __be64 *bits = (__be64 *)&buf[bit_offset];
+ int i;
+
+ buf[len++] = 0x80;
+ if (len > bit_offset) {
+ memset(&buf[len], 0, SHA256_BLOCK_SIZE - len);
+ sha256_choose_blocks(sctx->state, buf, 1, force_generic,
+ force_simd);
+ len = 0;
+ }
+
+ memset(&buf[len], 0, bit_offset - len);
+ *bits = cpu_to_be64(sctx->count << 3);
+ sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd);
+
+ for (i = 0; i < digest_size; i += 4)
+ put_unaligned_be32(sctx->state[i / 4], out + i);
+}
+
+#endif /* _CRYPTO_INTERNAL_SHA2_H */
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index be97b97a75dd..7e7f1ac3b7fd 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,7 @@
#ifndef _CRYPTO_INTERNAL_SIMD_H
#define _CRYPTO_INTERNAL_SIMD_H
+#include <asm/simd.h>
#include <linux/percpu.h>
#include <linux/types.h>
@@ -43,14 +44,9 @@ void simd_unregister_aeads(struct aead_alg *algs, int count,
*
* This delegates to may_use_simd(), except that this also returns false if SIMD
* in crypto code has been temporarily disabled on this CPU by the crypto
- * self-tests, in order to test the no-SIMD fallback code. This override is
- * currently limited to configurations where the extra self-tests are enabled,
- * because it might be a bit too invasive to be part of the regular self-tests.
- *
- * This is a macro so that <asm/simd.h>, which some architectures don't have,
- * doesn't have to be included directly here.
+ * self-tests, in order to test the no-SIMD fallback code.
*/
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+#ifdef CONFIG_CRYPTO_SELFTESTS
DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
#define crypto_simd_usable() \
(may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index a958ab0636ad..d5aa535263f6 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
+#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/types.h>
@@ -54,48 +55,6 @@ struct crypto_lskcipher_spawn {
struct crypto_spawn base;
};
-struct skcipher_walk {
- union {
- /* Virtual address of the source. */
- struct {
- struct {
- const void *const addr;
- } virt;
- } src;
-
- /* Private field for the API, do not use. */
- struct scatter_walk in;
- };
-
- unsigned int nbytes;
-
- union {
- /* Virtual address of the destination. */
- struct {
- struct {
- void *const addr;
- } virt;
- } dst;
-
- /* Private field for the API, do not use. */
- struct scatter_walk out;
- };
-
- unsigned int total;
-
- u8 *page;
- u8 *buffer;
- u8 *oiv;
- void *iv;
-
- unsigned int ivsize;
-
- int flags;
- unsigned int blocksize;
- unsigned int stride;
- unsigned int alignmask;
-};
-
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
@@ -212,7 +171,6 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
-int skcipher_walk_done(struct skcipher_walk *walk, int res);
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req,
bool atomic);
@@ -223,11 +181,6 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
struct aead_request *__restrict req,
bool atomic);
-static inline void skcipher_walk_abort(struct skcipher_walk *walk)
-{
- skcipher_walk_done(walk, -ECANCELED);
-}
-
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);