summaryrefslogtreecommitdiff
path: root/arch/riscv/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/crypto')
-rw-r--r--arch/riscv/crypto/Kconfig23
-rw-r--r--arch/riscv/crypto/Makefile6
-rw-r--r--arch/riscv/crypto/chacha-riscv64-glue.c101
-rw-r--r--arch/riscv/crypto/chacha-riscv64-zvkb.S294
-rw-r--r--arch/riscv/crypto/ghash-riscv64-glue.c58
-rw-r--r--arch/riscv/crypto/sha256-riscv64-glue.c137
-rw-r--r--arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S225
-rw-r--r--arch/riscv/crypto/sha512-riscv64-glue.c45
-rw-r--r--arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S4
-rw-r--r--arch/riscv/crypto/sm3-riscv64-glue.c47
-rw-r--r--arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S4
11 files changed, 57 insertions, 887 deletions
diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
index c67095a3d669..cd9b776602f8 100644
--- a/arch/riscv/crypto/Kconfig
+++ b/arch/riscv/crypto/Kconfig
@@ -18,16 +18,6 @@ config CRYPTO_AES_RISCV64
- Zvkb vector crypto extension (CTR)
- Zvkg vector crypto extension (XTS)
-config CRYPTO_CHACHA_RISCV64
- tristate "Ciphers: ChaCha"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
- select CRYPTO_SKCIPHER
- help
- Length-preserving ciphers: ChaCha20 stream cipher algorithm
-
- Architecture: riscv64 using:
- - Zvkb vector crypto extension
-
config CRYPTO_GHASH_RISCV64
tristate "Hash functions: GHASH"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
@@ -38,17 +28,6 @@ config CRYPTO_GHASH_RISCV64
Architecture: riscv64 using:
- Zvkg vector crypto extension
-config CRYPTO_SHA256_RISCV64
- tristate "Hash functions: SHA-224 and SHA-256"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
- select CRYPTO_SHA256
- help
- SHA-224 and SHA-256 secure hash algorithm (FIPS 180)
-
- Architecture: riscv64 using:
- - Zvknha or Zvknhb vector crypto extensions
- - Zvkb vector crypto extension
-
config CRYPTO_SHA512_RISCV64
tristate "Hash functions: SHA-384 and SHA-512"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
@@ -64,7 +43,7 @@ config CRYPTO_SM3_RISCV64
tristate "Hash functions: SM3 (ShangMi 3)"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
diff --git a/arch/riscv/crypto/Makefile b/arch/riscv/crypto/Makefile
index 247c7bc7288c..e10e8257734e 100644
--- a/arch/riscv/crypto/Makefile
+++ b/arch/riscv/crypto/Makefile
@@ -4,15 +4,9 @@ obj-$(CONFIG_CRYPTO_AES_RISCV64) += aes-riscv64.o
aes-riscv64-y := aes-riscv64-glue.o aes-riscv64-zvkned.o \
aes-riscv64-zvkned-zvbb-zvkg.o aes-riscv64-zvkned-zvkb.o
-obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o
-chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o
-
obj-$(CONFIG_CRYPTO_GHASH_RISCV64) += ghash-riscv64.o
ghash-riscv64-y := ghash-riscv64-glue.o ghash-riscv64-zvkg.o
-obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
-sha256-riscv64-y := sha256-riscv64-glue.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
-
obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o
sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o
diff --git a/arch/riscv/crypto/chacha-riscv64-glue.c b/arch/riscv/crypto/chacha-riscv64-glue.c
deleted file mode 100644
index 10b46f36375a..000000000000
--- a/arch/riscv/crypto/chacha-riscv64-glue.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ChaCha20 using the RISC-V vector crypto extensions
- *
- * Copyright (C) 2023 SiFive, Inc.
- * Author: Jerry Shih <jerry.shih@sifive.com>
- */
-
-#include <asm/simd.h>
-#include <asm/vector.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/linkage.h>
-#include <linux/module.h>
-
-asmlinkage void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out,
- size_t len, const u32 iv[4]);
-
-static int riscv64_chacha20_crypt(struct skcipher_request *req)
-{
- u32 iv[CHACHA_IV_SIZE / sizeof(u32)];
- u8 block_buffer[CHACHA_BLOCK_SIZE];
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- unsigned int tail_bytes;
- int err;
-
- iv[0] = get_unaligned_le32(req->iv);
- iv[1] = get_unaligned_le32(req->iv + 4);
- iv[2] = get_unaligned_le32(req->iv + 8);
- iv[3] = get_unaligned_le32(req->iv + 12);
-
- err = skcipher_walk_virt(&walk, req, false);
- while (walk.nbytes) {
- nbytes = walk.nbytes & ~(CHACHA_BLOCK_SIZE - 1);
- tail_bytes = walk.nbytes & (CHACHA_BLOCK_SIZE - 1);
- kernel_vector_begin();
- if (nbytes) {
- chacha20_zvkb(ctx->key, walk.src.virt.addr,
- walk.dst.virt.addr, nbytes, iv);
- iv[0] += nbytes / CHACHA_BLOCK_SIZE;
- }
- if (walk.nbytes == walk.total && tail_bytes > 0) {
- memcpy(block_buffer, walk.src.virt.addr + nbytes,
- tail_bytes);
- chacha20_zvkb(ctx->key, block_buffer, block_buffer,
- CHACHA_BLOCK_SIZE, iv);
- memcpy(walk.dst.virt.addr + nbytes, block_buffer,
- tail_bytes);
- tail_bytes = 0;
- }
- kernel_vector_end();
-
- err = skcipher_walk_done(&walk, tail_bytes);
- }
-
- return err;
-}
-
-static struct skcipher_alg riscv64_chacha_alg = {
- .setkey = chacha20_setkey,
- .encrypt = riscv64_chacha20_crypt,
- .decrypt = riscv64_chacha20_crypt,
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .base = {
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct chacha_ctx),
- .cra_priority = 300,
- .cra_name = "chacha20",
- .cra_driver_name = "chacha20-riscv64-zvkb",
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init riscv64_chacha_mod_init(void)
-{
- if (riscv_isa_extension_available(NULL, ZVKB) &&
- riscv_vector_vlen() >= 128)
- return crypto_register_skcipher(&riscv64_chacha_alg);
-
- return -ENODEV;
-}
-
-static void __exit riscv64_chacha_mod_exit(void)
-{
- crypto_unregister_skcipher(&riscv64_chacha_alg);
-}
-
-module_init(riscv64_chacha_mod_init);
-module_exit(riscv64_chacha_mod_exit);
-
-MODULE_DESCRIPTION("ChaCha20 (RISC-V accelerated)");
-MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("chacha20");
diff --git a/arch/riscv/crypto/chacha-riscv64-zvkb.S b/arch/riscv/crypto/chacha-riscv64-zvkb.S
deleted file mode 100644
index bf057737ac69..000000000000
--- a/arch/riscv/crypto/chacha-riscv64-zvkb.S
+++ /dev/null
@@ -1,294 +0,0 @@
-/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
-//
-// This file is dual-licensed, meaning that you can use it under your
-// choice of either of the following two licenses:
-//
-// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the Apache License 2.0 (the "License"). You can obtain
-// a copy in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-//
-// or
-//
-// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
-// Copyright 2024 Google LLC
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The generated code of this file depends on the following RISC-V extensions:
-// - RV64I
-// - RISC-V Vector ('V') with VLEN >= 128
-// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-
-#include <linux/linkage.h>
-
-.text
-.option arch, +zvkb
-
-#define KEYP a0
-#define INP a1
-#define OUTP a2
-#define LEN a3
-#define IVP a4
-
-#define CONSTS0 a5
-#define CONSTS1 a6
-#define CONSTS2 a7
-#define CONSTS3 t0
-#define TMP t1
-#define VL t2
-#define STRIDE t3
-#define NROUNDS t4
-#define KEY0 s0
-#define KEY1 s1
-#define KEY2 s2
-#define KEY3 s3
-#define KEY4 s4
-#define KEY5 s5
-#define KEY6 s6
-#define KEY7 s7
-#define COUNTER s8
-#define NONCE0 s9
-#define NONCE1 s10
-#define NONCE2 s11
-
-.macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \
- a2, b2, c2, d2, a3, b3, c3, d3
- // a += b; d ^= a; d = rol(d, 16);
- vadd.vv \a0, \a0, \b0
- vadd.vv \a1, \a1, \b1
- vadd.vv \a2, \a2, \b2
- vadd.vv \a3, \a3, \b3
- vxor.vv \d0, \d0, \a0
- vxor.vv \d1, \d1, \a1
- vxor.vv \d2, \d2, \a2
- vxor.vv \d3, \d3, \a3
- vror.vi \d0, \d0, 32 - 16
- vror.vi \d1, \d1, 32 - 16
- vror.vi \d2, \d2, 32 - 16
- vror.vi \d3, \d3, 32 - 16
-
- // c += d; b ^= c; b = rol(b, 12);
- vadd.vv \c0, \c0, \d0
- vadd.vv \c1, \c1, \d1
- vadd.vv \c2, \c2, \d2
- vadd.vv \c3, \c3, \d3
- vxor.vv \b0, \b0, \c0
- vxor.vv \b1, \b1, \c1
- vxor.vv \b2, \b2, \c2
- vxor.vv \b3, \b3, \c3
- vror.vi \b0, \b0, 32 - 12
- vror.vi \b1, \b1, 32 - 12
- vror.vi \b2, \b2, 32 - 12
- vror.vi \b3, \b3, 32 - 12
-
- // a += b; d ^= a; d = rol(d, 8);
- vadd.vv \a0, \a0, \b0
- vadd.vv \a1, \a1, \b1
- vadd.vv \a2, \a2, \b2
- vadd.vv \a3, \a3, \b3
- vxor.vv \d0, \d0, \a0
- vxor.vv \d1, \d1, \a1
- vxor.vv \d2, \d2, \a2
- vxor.vv \d3, \d3, \a3
- vror.vi \d0, \d0, 32 - 8
- vror.vi \d1, \d1, 32 - 8
- vror.vi \d2, \d2, 32 - 8
- vror.vi \d3, \d3, 32 - 8
-
- // c += d; b ^= c; b = rol(b, 7);
- vadd.vv \c0, \c0, \d0
- vadd.vv \c1, \c1, \d1
- vadd.vv \c2, \c2, \d2
- vadd.vv \c3, \c3, \d3
- vxor.vv \b0, \b0, \c0
- vxor.vv \b1, \b1, \c1
- vxor.vv \b2, \b2, \c2
- vxor.vv \b3, \b3, \c3
- vror.vi \b0, \b0, 32 - 7
- vror.vi \b1, \b1, 32 - 7
- vror.vi \b2, \b2, 32 - 7
- vror.vi \b3, \b3, 32 - 7
-.endm
-
-// void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out, size_t len,
-// const u32 iv[4]);
-//
-// |len| must be nonzero and a multiple of 64 (CHACHA_BLOCK_SIZE).
-// The counter is treated as 32-bit, following the RFC7539 convention.
-SYM_FUNC_START(chacha20_zvkb)
- srli LEN, LEN, 6 // Bytes to blocks
-
- addi sp, sp, -96
- sd s0, 0(sp)
- sd s1, 8(sp)
- sd s2, 16(sp)
- sd s3, 24(sp)
- sd s4, 32(sp)
- sd s5, 40(sp)
- sd s6, 48(sp)
- sd s7, 56(sp)
- sd s8, 64(sp)
- sd s9, 72(sp)
- sd s10, 80(sp)
- sd s11, 88(sp)
-
- li STRIDE, 64
-
- // Set up the initial state matrix in scalar registers.
- li CONSTS0, 0x61707865 // "expa" little endian
- li CONSTS1, 0x3320646e // "nd 3" little endian
- li CONSTS2, 0x79622d32 // "2-by" little endian
- li CONSTS3, 0x6b206574 // "te k" little endian
- lw KEY0, 0(KEYP)
- lw KEY1, 4(KEYP)
- lw KEY2, 8(KEYP)
- lw KEY3, 12(KEYP)
- lw KEY4, 16(KEYP)
- lw KEY5, 20(KEYP)
- lw KEY6, 24(KEYP)
- lw KEY7, 28(KEYP)
- lw COUNTER, 0(IVP)
- lw NONCE0, 4(IVP)
- lw NONCE1, 8(IVP)
- lw NONCE2, 12(IVP)
-
-.Lblock_loop:
- // Set vl to the number of blocks to process in this iteration.
- vsetvli VL, LEN, e32, m1, ta, ma
-
- // Set up the initial state matrix for the next VL blocks in v0-v15.
- // v{i} holds the i'th 32-bit word of the state matrix for all blocks.
- // Note that only the counter word, at index 12, differs across blocks.
- vmv.v.x v0, CONSTS0
- vmv.v.x v1, CONSTS1
- vmv.v.x v2, CONSTS2
- vmv.v.x v3, CONSTS3
- vmv.v.x v4, KEY0
- vmv.v.x v5, KEY1
- vmv.v.x v6, KEY2
- vmv.v.x v7, KEY3
- vmv.v.x v8, KEY4
- vmv.v.x v9, KEY5
- vmv.v.x v10, KEY6
- vmv.v.x v11, KEY7
- vid.v v12
- vadd.vx v12, v12, COUNTER
- vmv.v.x v13, NONCE0
- vmv.v.x v14, NONCE1
- vmv.v.x v15, NONCE2
-
- // Load the first half of the input data for each block into v16-v23.
- // v{16+i} holds the i'th 32-bit word for all blocks.
- vlsseg8e32.v v16, (INP), STRIDE
-
- li NROUNDS, 20
-.Lnext_doubleround:
- addi NROUNDS, NROUNDS, -2
- // column round
- chacha_round v0, v4, v8, v12, v1, v5, v9, v13, \
- v2, v6, v10, v14, v3, v7, v11, v15
- // diagonal round
- chacha_round v0, v5, v10, v15, v1, v6, v11, v12, \
- v2, v7, v8, v13, v3, v4, v9, v14
- bnez NROUNDS, .Lnext_doubleround
-
- // Load the second half of the input data for each block into v24-v31.
- // v{24+i} holds the {8+i}'th 32-bit word for all blocks.
- addi TMP, INP, 32
- vlsseg8e32.v v24, (TMP), STRIDE
-
- // Finalize the first half of the keystream for each block.
- vadd.vx v0, v0, CONSTS0
- vadd.vx v1, v1, CONSTS1
- vadd.vx v2, v2, CONSTS2
- vadd.vx v3, v3, CONSTS3
- vadd.vx v4, v4, KEY0
- vadd.vx v5, v5, KEY1
- vadd.vx v6, v6, KEY2
- vadd.vx v7, v7, KEY3
-
- // Encrypt/decrypt the first half of the data for each block.
- vxor.vv v16, v16, v0
- vxor.vv v17, v17, v1
- vxor.vv v18, v18, v2
- vxor.vv v19, v19, v3
- vxor.vv v20, v20, v4
- vxor.vv v21, v21, v5
- vxor.vv v22, v22, v6
- vxor.vv v23, v23, v7
-
- // Store the first half of the output data for each block.
- vssseg8e32.v v16, (OUTP), STRIDE
-
- // Finalize the second half of the keystream for each block.
- vadd.vx v8, v8, KEY4
- vadd.vx v9, v9, KEY5
- vadd.vx v10, v10, KEY6
- vadd.vx v11, v11, KEY7
- vid.v v0
- vadd.vx v12, v12, COUNTER
- vadd.vx v13, v13, NONCE0
- vadd.vx v14, v14, NONCE1
- vadd.vx v15, v15, NONCE2
- vadd.vv v12, v12, v0
-
- // Encrypt/decrypt the second half of the data for each block.
- vxor.vv v24, v24, v8
- vxor.vv v25, v25, v9
- vxor.vv v26, v26, v10
- vxor.vv v27, v27, v11
- vxor.vv v29, v29, v13
- vxor.vv v28, v28, v12
- vxor.vv v30, v30, v14
- vxor.vv v31, v31, v15
-
- // Store the second half of the output data for each block.
- addi TMP, OUTP, 32
- vssseg8e32.v v24, (TMP), STRIDE
-
- // Update the counter, the remaining number of blocks, and the input and
- // output pointers according to the number of blocks processed (VL).
- add COUNTER, COUNTER, VL
- sub LEN, LEN, VL
- slli TMP, VL, 6
- add OUTP, OUTP, TMP
- add INP, INP, TMP
- bnez LEN, .Lblock_loop
-
- ld s0, 0(sp)
- ld s1, 8(sp)
- ld s2, 16(sp)
- ld s3, 24(sp)
- ld s4, 32(sp)
- ld s5, 40(sp)
- ld s6, 48(sp)
- ld s7, 56(sp)
- ld s8, 64(sp)
- ld s9, 72(sp)
- ld s10, 80(sp)
- ld s11, 88(sp)
- addi sp, sp, 96
- ret
-SYM_FUNC_END(chacha20_zvkb)
diff --git a/arch/riscv/crypto/ghash-riscv64-glue.c b/arch/riscv/crypto/ghash-riscv64-glue.c
index 312e7891fd0a..d86073d25387 100644
--- a/arch/riscv/crypto/ghash-riscv64-glue.c
+++ b/arch/riscv/crypto/ghash-riscv64-glue.c
@@ -11,11 +11,16 @@
#include <asm/simd.h>
#include <asm/vector.h>
+#include <crypto/b128ops.h>
+#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <linux/linkage.h>
+#include <crypto/utils.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
asmlinkage void ghash_zvkg(be128 *accumulator, const be128 *key, const u8 *data,
size_t len);
@@ -26,8 +31,6 @@ struct riscv64_ghash_tfm_ctx {
struct riscv64_ghash_desc_ctx {
be128 accumulator;
- u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
};
static int riscv64_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -78,50 +81,24 @@ static int riscv64_ghash_update(struct shash_desc *desc, const u8 *src,
{
const struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int len;
-
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_BLOCK_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src, srclen);
- dctx->bytes += srclen;
- return 0;
- }
- memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_BLOCK_SIZE - dctx->bytes);
- riscv64_ghash_blocks(tctx, dctx, dctx->buffer,
- GHASH_BLOCK_SIZE);
- src += GHASH_BLOCK_SIZE - dctx->bytes;
- srclen -= GHASH_BLOCK_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
-
- len = round_down(srclen, GHASH_BLOCK_SIZE);
- if (len) {
- riscv64_ghash_blocks(tctx, dctx, src, len);
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
- }
-
- return 0;
+ riscv64_ghash_blocks(tctx, dctx, src,
+ round_down(srclen, GHASH_BLOCK_SIZE));
+ return srclen - round_down(srclen, GHASH_BLOCK_SIZE);
}
-static int riscv64_ghash_final(struct shash_desc *desc, u8 *out)
+static int riscv64_ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
const struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- int i;
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_BLOCK_SIZE; i++)
- dctx->buffer[i] = 0;
+ if (len) {
+ u8 buf[GHASH_BLOCK_SIZE] = {};
- riscv64_ghash_blocks(tctx, dctx, dctx->buffer,
- GHASH_BLOCK_SIZE);
+ memcpy(buf, src, len);
+ riscv64_ghash_blocks(tctx, dctx, buf, GHASH_BLOCK_SIZE);
+ memzero_explicit(buf, sizeof(buf));
}
memcpy(out, &dctx->accumulator, GHASH_DIGEST_SIZE);
@@ -131,7 +108,7 @@ static int riscv64_ghash_final(struct shash_desc *desc, u8 *out)
static struct shash_alg riscv64_ghash_alg = {
.init = riscv64_ghash_init,
.update = riscv64_ghash_update,
- .final = riscv64_ghash_final,
+ .finup = riscv64_ghash_finup,
.setkey = riscv64_ghash_setkey,
.descsize = sizeof(struct riscv64_ghash_desc_ctx),
.digestsize = GHASH_DIGEST_SIZE,
@@ -139,6 +116,7 @@ static struct shash_alg riscv64_ghash_alg = {
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct riscv64_ghash_tfm_ctx),
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_name = "ghash",
.cra_driver_name = "ghash-riscv64-zvkg",
.cra_module = THIS_MODULE,
diff --git a/arch/riscv/crypto/sha256-riscv64-glue.c b/arch/riscv/crypto/sha256-riscv64-glue.c
deleted file mode 100644
index 71e051e40a64..000000000000
--- a/arch/riscv/crypto/sha256-riscv64-glue.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SHA-256 and SHA-224 using the RISC-V vector crypto extensions
- *
- * Copyright (C) 2022 VRULL GmbH
- * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
- *
- * Copyright (C) 2023 SiFive, Inc.
- * Author: Jerry Shih <jerry.shih@sifive.com>
- */
-
-#include <asm/simd.h>
-#include <asm/vector.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <crypto/sha256_base.h>
-#include <linux/linkage.h>
-#include <linux/module.h>
-
-/*
- * Note: the asm function only uses the 'state' field of struct sha256_state.
- * It is assumed to be the first field.
- */
-asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb(
- struct sha256_state *state, const u8 *data, int num_blocks);
-
-static int riscv64_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- /*
- * Ensure struct sha256_state begins directly with the SHA-256
- * 256-bit internal state, as this is what the asm function expects.
- */
- BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
-
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- sha256_base_do_update(desc, data, len,
- sha256_transform_zvknha_or_zvknhb_zvkb);
- kernel_vector_end();
- } else {
- crypto_sha256_update(desc, data, len);
- }
- return 0;
-}
-
-static int riscv64_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- if (len)
- sha256_base_do_update(
- desc, data, len,
- sha256_transform_zvknha_or_zvknhb_zvkb);
- sha256_base_do_finalize(
- desc, sha256_transform_zvknha_or_zvknhb_zvkb);
- kernel_vector_end();
-
- return sha256_base_finish(desc, out);
- }
-
- return crypto_sha256_finup(desc, data, len, out);
-}
-
-static int riscv64_sha256_final(struct shash_desc *desc, u8 *out)
-{
- return riscv64_sha256_finup(desc, NULL, 0, out);
-}
-
-static int riscv64_sha256_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return sha256_base_init(desc) ?:
- riscv64_sha256_finup(desc, data, len, out);
-}
-
-static struct shash_alg riscv64_sha256_algs[] = {
- {
- .init = sha256_base_init,
- .update = riscv64_sha256_update,
- .final = riscv64_sha256_final,
- .finup = riscv64_sha256_finup,
- .digest = riscv64_sha256_digest,
- .descsize = sizeof(struct sha256_state),
- .digestsize = SHA256_DIGEST_SIZE,
- .base = {
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_priority = 300,
- .cra_name = "sha256",
- .cra_driver_name = "sha256-riscv64-zvknha_or_zvknhb-zvkb",
- .cra_module = THIS_MODULE,
- },
- }, {
- .init = sha224_base_init,
- .update = riscv64_sha256_update,
- .final = riscv64_sha256_final,
- .finup = riscv64_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .digestsize = SHA224_DIGEST_SIZE,
- .base = {
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_priority = 300,
- .cra_name = "sha224",
- .cra_driver_name = "sha224-riscv64-zvknha_or_zvknhb-zvkb",
- .cra_module = THIS_MODULE,
- },
- },
-};
-
-static int __init riscv64_sha256_mod_init(void)
-{
- /* Both zvknha and zvknhb provide the SHA-256 instructions. */
- if ((riscv_isa_extension_available(NULL, ZVKNHA) ||
- riscv_isa_extension_available(NULL, ZVKNHB)) &&
- riscv_isa_extension_available(NULL, ZVKB) &&
- riscv_vector_vlen() >= 128)
- return crypto_register_shashes(riscv64_sha256_algs,
- ARRAY_SIZE(riscv64_sha256_algs));
-
- return -ENODEV;
-}
-
-static void __exit riscv64_sha256_mod_exit(void)
-{
- crypto_unregister_shashes(riscv64_sha256_algs,
- ARRAY_SIZE(riscv64_sha256_algs));
-}
-
-module_init(riscv64_sha256_mod_init);
-module_exit(riscv64_sha256_mod_exit);
-
-MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)");
-MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha224");
diff --git a/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S b/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
deleted file mode 100644
index 8ebcc17de4dc..000000000000
--- a/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
+++ /dev/null
@@ -1,225 +0,0 @@
-/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
-//
-// This file is dual-licensed, meaning that you can use it under your
-// choice of either of the following two licenses:
-//
-// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the Apache License 2.0 (the "License"). You can obtain
-// a copy in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-//
-// or
-//
-// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
-// Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
-// Copyright 2024 Google LLC
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The generated code of this file depends on the following RISC-V extensions:
-// - RV64I
-// - RISC-V Vector ('V') with VLEN >= 128
-// - RISC-V Vector SHA-2 Secure Hash extension ('Zvknha' or 'Zvknhb')
-// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-
-#include <linux/cfi_types.h>
-
-.text
-.option arch, +zvknha, +zvkb
-
-#define STATEP a0
-#define DATA a1
-#define NUM_BLOCKS a2
-
-#define STATEP_C a3
-
-#define MASK v0
-#define INDICES v1
-#define W0 v2
-#define W1 v3
-#define W2 v4
-#define W3 v5
-#define VTMP v6
-#define FEBA v7
-#define HGDC v8
-#define K0 v10
-#define K1 v11
-#define K2 v12
-#define K3 v13
-#define K4 v14
-#define K5 v15
-#define K6 v16
-#define K7 v17
-#define K8 v18
-#define K9 v19
-#define K10 v20
-#define K11 v21
-#define K12 v22
-#define K13 v23
-#define K14 v24
-#define K15 v25
-#define PREV_FEBA v26
-#define PREV_HGDC v27
-
-// Do 4 rounds of SHA-256. w0 contains the current 4 message schedule words.
-//
-// If not all the message schedule words have been computed yet, then this also
-// computes 4 more message schedule words. w1-w3 contain the next 3 groups of 4
-// message schedule words; this macro computes the group after w3 and writes it
-// to w0. This means that the next (w0, w1, w2, w3) is the current (w1, w2, w3,
-// w0), so the caller must cycle through the registers accordingly.
-.macro sha256_4rounds last, k, w0, w1, w2, w3
- vadd.vv VTMP, \k, \w0
- vsha2cl.vv HGDC, FEBA, VTMP
- vsha2ch.vv FEBA, HGDC, VTMP
-.if !\last
- vmerge.vvm VTMP, \w2, \w1, MASK
- vsha2ms.vv \w0, VTMP, \w3
-.endif
-.endm
-
-.macro sha256_16rounds last, k0, k1, k2, k3
- sha256_4rounds \last, \k0, W0, W1, W2, W3
- sha256_4rounds \last, \k1, W1, W2, W3, W0
- sha256_4rounds \last, \k2, W2, W3, W0, W1
- sha256_4rounds \last, \k3, W3, W0, W1, W2
-.endm
-
-// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[8], const u8 *data,
-// int num_blocks);
-SYM_TYPED_FUNC_START(sha256_transform_zvknha_or_zvknhb_zvkb)
-
- // Load the round constants into K0-K15.
- vsetivli zero, 4, e32, m1, ta, ma
- la t0, K256
- vle32.v K0, (t0)
- addi t0, t0, 16
- vle32.v K1, (t0)
- addi t0, t0, 16
- vle32.v K2, (t0)
- addi t0, t0, 16
- vle32.v K3, (t0)
- addi t0, t0, 16
- vle32.v K4, (t0)
- addi t0, t0, 16
- vle32.v K5, (t0)
- addi t0, t0, 16
- vle32.v K6, (t0)
- addi t0, t0, 16
- vle32.v K7, (t0)
- addi t0, t0, 16
- vle32.v K8, (t0)
- addi t0, t0, 16
- vle32.v K9, (t0)
- addi t0, t0, 16
- vle32.v K10, (t0)
- addi t0, t0, 16
- vle32.v K11, (t0)
- addi t0, t0, 16
- vle32.v K12, (t0)
- addi t0, t0, 16
- vle32.v K13, (t0)
- addi t0, t0, 16
- vle32.v K14, (t0)
- addi t0, t0, 16
- vle32.v K15, (t0)
-
- // Setup mask for the vmerge to replace the first word (idx==0) in
- // message scheduling. There are 4 words, so an 8-bit mask suffices.
- vsetivli zero, 1, e8, m1, ta, ma
- vmv.v.i MASK, 0x01
-
- // Load the state. The state is stored as {a,b,c,d,e,f,g,h}, but we
- // need {f,e,b,a},{h,g,d,c}. The dst vtype is e32m1 and the index vtype
- // is e8mf4. We use index-load with the i8 indices {20, 16, 4, 0},
- // loaded using the 32-bit little endian value 0x00041014.
- li t0, 0x00041014
- vsetivli zero, 1, e32, m1, ta, ma
- vmv.v.x INDICES, t0
- addi STATEP_C, STATEP, 8
- vsetivli zero, 4, e32, m1, ta, ma
- vluxei8.v FEBA, (STATEP), INDICES
- vluxei8.v HGDC, (STATEP_C), INDICES
-
-.Lnext_block:
- addi NUM_BLOCKS, NUM_BLOCKS, -1
-
- // Save the previous state, as it's needed later.
- vmv.v.v PREV_FEBA, FEBA
- vmv.v.v PREV_HGDC, HGDC
-
- // Load the next 512-bit message block and endian-swap each 32-bit word.
- vle32.v W0, (DATA)
- vrev8.v W0, W0
- addi DATA, DATA, 16
- vle32.v W1, (DATA)
- vrev8.v W1, W1
- addi DATA, DATA, 16
- vle32.v W2, (DATA)
- vrev8.v W2, W2
- addi DATA, DATA, 16
- vle32.v W3, (DATA)
- vrev8.v W3, W3
- addi DATA, DATA, 16
-
- // Do the 64 rounds of SHA-256.
- sha256_16rounds 0, K0, K1, K2, K3
- sha256_16rounds 0, K4, K5, K6, K7
- sha256_16rounds 0, K8, K9, K10, K11
- sha256_16rounds 1, K12, K13, K14, K15
-
- // Add the previous state.
- vadd.vv FEBA, FEBA, PREV_FEBA
- vadd.vv HGDC, HGDC, PREV_HGDC
-
- // Repeat if more blocks remain.
- bnez NUM_BLOCKS, .Lnext_block
-
- // Store the new state and return.
- vsuxei8.v FEBA, (STATEP), INDICES
- vsuxei8.v HGDC, (STATEP_C), INDICES
- ret
-SYM_FUNC_END(sha256_transform_zvknha_or_zvknhb_zvkb)
-
-.section ".rodata"
-.p2align 2
-.type K256, @object
-K256:
- .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
- .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
- .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
- .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
- .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
- .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
- .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
- .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
- .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
- .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
- .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
- .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
- .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
- .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
- .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
- .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
-.size K256, . - K256
diff --git a/arch/riscv/crypto/sha512-riscv64-glue.c b/arch/riscv/crypto/sha512-riscv64-glue.c
index 43b56a08aeb5..4634fca78ae2 100644
--- a/arch/riscv/crypto/sha512-riscv64-glue.c
+++ b/arch/riscv/crypto/sha512-riscv64-glue.c
@@ -14,7 +14,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha512_base.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
#include <linux/module.h>
/*
@@ -24,8 +24,8 @@
asmlinkage void sha512_transform_zvknhb_zvkb(
struct sha512_state *state, const u8 *data, int num_blocks);
-static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static void sha512_block(struct sha512_state *state, const u8 *data,
+ int num_blocks)
{
/*
* Ensure struct sha512_state begins directly with the SHA-512
@@ -35,35 +35,24 @@ static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data,
if (crypto_simd_usable()) {
kernel_vector_begin();
- sha512_base_do_update(desc, data, len,
- sha512_transform_zvknhb_zvkb);
+ sha512_transform_zvknhb_zvkb(state, data, num_blocks);
kernel_vector_end();
} else {
- crypto_sha512_update(desc, data, len);
+ sha512_generic_block_fn(state, data, num_blocks);
}
- return 0;
}
-static int riscv64_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- if (len)
- sha512_base_do_update(desc, data, len,
- sha512_transform_zvknhb_zvkb);
- sha512_base_do_finalize(desc, sha512_transform_zvknhb_zvkb);
- kernel_vector_end();
-
- return sha512_base_finish(desc, out);
- }
-
- return crypto_sha512_finup(desc, data, len, out);
+ return sha512_base_do_update_blocks(desc, data, len, sha512_block);
}
-static int riscv64_sha512_final(struct shash_desc *desc, u8 *out)
+static int riscv64_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- return riscv64_sha512_finup(desc, NULL, 0, out);
+ sha512_base_do_finup(desc, data, len, sha512_block);
+ return sha512_base_finish(desc, out);
}
static int riscv64_sha512_digest(struct shash_desc *desc, const u8 *data,
@@ -77,14 +66,15 @@ static struct shash_alg riscv64_sha512_algs[] = {
{
.init = sha512_base_init,
.update = riscv64_sha512_update,
- .final = riscv64_sha512_final,
.finup = riscv64_sha512_finup,
.digest = riscv64_sha512_digest,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA512_DIGEST_SIZE,
.base = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_name = "sha512",
.cra_driver_name = "sha512-riscv64-zvknhb-zvkb",
.cra_module = THIS_MODULE,
@@ -92,13 +82,14 @@ static struct shash_alg riscv64_sha512_algs[] = {
}, {
.init = sha384_base_init,
.update = riscv64_sha512_update,
- .final = riscv64_sha512_final,
.finup = riscv64_sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA384_DIGEST_SIZE,
.base = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_name = "sha384",
.cra_driver_name = "sha384-riscv64-zvknhb-zvkb",
.cra_module = THIS_MODULE,
diff --git a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
index 3a9ae210f915..89f4a10d12dd 100644
--- a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
+++ b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
@@ -43,7 +43,7 @@
// - RISC-V Vector SHA-2 Secure Hash extension ('Zvknhb')
// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-#include <linux/cfi_types.h>
+#include <linux/linkage.h>
.text
.option arch, +zvknhb, +zvkb
@@ -95,7 +95,7 @@
// void sha512_transform_zvknhb_zvkb(u64 state[8], const u8 *data,
// int num_blocks);
-SYM_TYPED_FUNC_START(sha512_transform_zvknhb_zvkb)
+SYM_FUNC_START(sha512_transform_zvknhb_zvkb)
// Setup mask for the vmerge to replace the first word (idx==0) in
// message scheduling. There are 4 words, so an 8-bit mask suffices.
diff --git a/arch/riscv/crypto/sm3-riscv64-glue.c b/arch/riscv/crypto/sm3-riscv64-glue.c
index e1737a970c7c..abdfe4a63a27 100644
--- a/arch/riscv/crypto/sm3-riscv64-glue.c
+++ b/arch/riscv/crypto/sm3-riscv64-glue.c
@@ -13,8 +13,9 @@
#include <asm/vector.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
+#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
#include <linux/module.h>
/*
@@ -24,8 +25,8 @@
asmlinkage void sm3_transform_zvksh_zvkb(
struct sm3_state *state, const u8 *data, int num_blocks);
-static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static void sm3_block(struct sm3_state *state, const u8 *data,
+ int num_blocks)
{
/*
* Ensure struct sm3_state begins directly with the SM3
@@ -35,52 +36,36 @@ static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
if (crypto_simd_usable()) {
kernel_vector_begin();
- sm3_base_do_update(desc, data, len, sm3_transform_zvksh_zvkb);
+ sm3_transform_zvksh_zvkb(state, data, num_blocks);
kernel_vector_end();
} else {
- sm3_update(shash_desc_ctx(desc), data, len);
+ sm3_block_generic(state, data, num_blocks);
}
- return 0;
}
-static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct sm3_state *ctx;
-
- if (crypto_simd_usable()) {
- kernel_vector_begin();
- if (len)
- sm3_base_do_update(desc, data, len,
- sm3_transform_zvksh_zvkb);
- sm3_base_do_finalize(desc, sm3_transform_zvksh_zvkb);
- kernel_vector_end();
-
- return sm3_base_finish(desc, out);
- }
-
- ctx = shash_desc_ctx(desc);
- if (len)
- sm3_update(ctx, data, len);
- sm3_final(ctx, out);
-
- return 0;
+ return sm3_base_do_update_blocks(desc, data, len, sm3_block);
}
-static int riscv64_sm3_final(struct shash_desc *desc, u8 *out)
+static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- return riscv64_sm3_finup(desc, NULL, 0, out);
+ sm3_base_do_finup(desc, data, len, sm3_block);
+ return sm3_base_finish(desc, out);
}
static struct shash_alg riscv64_sm3_alg = {
.init = sm3_base_init,
.update = riscv64_sm3_update,
- .final = riscv64_sm3_final,
.finup = riscv64_sm3_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.digestsize = SM3_DIGEST_SIZE,
.base = {
.cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_priority = 300,
.cra_name = "sm3",
.cra_driver_name = "sm3-riscv64-zvksh-zvkb",
diff --git a/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S b/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
index a2b65d961c04..4fe754846f65 100644
--- a/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
+++ b/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
@@ -43,7 +43,7 @@
// - RISC-V Vector SM3 Secure Hash extension ('Zvksh')
// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
-#include <linux/cfi_types.h>
+#include <linux/linkage.h>
.text
.option arch, +zvksh, +zvkb
@@ -81,7 +81,7 @@
.endm
// void sm3_transform_zvksh_zvkb(u32 state[8], const u8 *data, int num_blocks);
-SYM_TYPED_FUNC_START(sm3_transform_zvksh_zvkb)
+SYM_FUNC_START(sm3_transform_zvksh_zvkb)
// Load the state and endian-swap each 32-bit word.
vsetivli zero, 8, e32, m2, ta, ma