summaryrefslogtreecommitdiff
path: root/arch/powerpc/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/crypto')
-rw-r--r--arch/powerpc/crypto/Kconfig44
-rw-r--r--arch/powerpc/crypto/Makefile6
-rw-r--r--arch/powerpc/crypto/aes.c8
-rw-r--r--arch/powerpc/crypto/aes_cbc.c4
-rw-r--r--arch/powerpc/crypto/aes_ctr.c4
-rw-r--r--arch/powerpc/crypto/aes_xts.c4
-rw-r--r--arch/powerpc/crypto/chacha-p10-glue.c221
-rw-r--r--arch/powerpc/crypto/chacha-p10le-8x.S842
-rw-r--r--arch/powerpc/crypto/ghash.c91
-rw-r--r--arch/powerpc/crypto/md5-glue.c99
-rw-r--r--arch/powerpc/crypto/poly1305-p10-glue.c186
-rw-r--r--arch/powerpc/crypto/poly1305-p10le_64.S1075
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c130
-rw-r--r--arch/powerpc/crypto/sha1.c101
-rw-r--r--arch/powerpc/crypto/sha256-spe-asm.S318
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c235
16 files changed, 114 insertions, 3254 deletions
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 370db8192ce6..caaa359f4742 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -17,7 +17,6 @@ config CRYPTO_CURVE25519_PPC64
config CRYPTO_MD5_PPC
tristate "Digests: MD5"
- depends on PPC
select CRYPTO_HASH
help
MD5 message digest algorithm (RFC1321)
@@ -26,7 +25,6 @@ config CRYPTO_MD5_PPC
config CRYPTO_SHA1_PPC
tristate "Hash functions: SHA-1"
- depends on PPC
help
SHA-1 secure hash algorithm (FIPS 180)
@@ -34,27 +32,16 @@ config CRYPTO_SHA1_PPC
config CRYPTO_SHA1_PPC_SPE
tristate "Hash functions: SHA-1 (SPE)"
- depends on PPC && SPE
+ depends on SPE
help
SHA-1 secure hash algorithm (FIPS 180)
Architecture: powerpc using
- SPE (Signal Processing Engine) extensions
-config CRYPTO_SHA256_PPC_SPE
- tristate "Hash functions: SHA-224 and SHA-256 (SPE)"
- depends on PPC && SPE
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: powerpc using
- - SPE (Signal Processing Engine) extensions
-
config CRYPTO_AES_PPC_SPE
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)"
- depends on PPC && SPE
+ depends on SPE
select CRYPTO_SKCIPHER
help
Block ciphers: AES cipher algorithms (FIPS-197)
@@ -92,33 +79,6 @@ config CRYPTO_AES_GCM_P10
Support for cryptographic acceleration instructions on Power10 or
later CPU. This module supports stitched acceleration for AES/GCM.
-config CRYPTO_CHACHA20_P10
- tristate
- depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: PowerPC64
- - Power10 or later
- - Little-endian
-
-config CRYPTO_POLY1305_P10
- tristate "Hash functions: Poly1305 (P10 or later)"
- depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
- select CRYPTO_HASH
- select CRYPTO_LIB_POLY1305_GENERIC
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Architecture: PowerPC64
- - Power10 or later
- - Little-endian
-
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
depends on PPC64 && VSX
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 2f00b22b0823..8c2936ae466f 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -9,10 +9,7 @@ obj-$(CONFIG_CRYPTO_AES_PPC_SPE) += aes-ppc-spe.o
obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
-obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
-obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
-obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
obj-$(CONFIG_CRYPTO_CURVE25519_PPC64) += curve25519-ppc64le.o
@@ -20,10 +17,7 @@ aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-
md5-ppc-y := md5-asm.o md5-glue.o
sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
-sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
-chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
-poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
curve25519-ppc64le-y := curve25519-ppc64le-core.o curve25519-ppc64le_asm.o
diff --git a/arch/powerpc/crypto/aes.c b/arch/powerpc/crypto/aes.c
index ec06189fbf99..3f1e5e894902 100644
--- a/arch/powerpc/crypto/aes.c
+++ b/arch/powerpc/crypto/aes.c
@@ -7,15 +7,15 @@
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
#include "aesp8-ppc.h"
diff --git a/arch/powerpc/crypto/aes_cbc.c b/arch/powerpc/crypto/aes_cbc.c
index ed0debc7acb5..5f2a4f375eef 100644
--- a/arch/powerpc/crypto/aes_cbc.c
+++ b/arch/powerpc/crypto/aes_cbc.c
@@ -12,6 +12,10 @@
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
#include "aesp8-ppc.h"
diff --git a/arch/powerpc/crypto/aes_ctr.c b/arch/powerpc/crypto/aes_ctr.c
index 3da75f42529a..e27c4036e711 100644
--- a/arch/powerpc/crypto/aes_ctr.c
+++ b/arch/powerpc/crypto/aes_ctr.c
@@ -12,6 +12,10 @@
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
#include "aesp8-ppc.h"
diff --git a/arch/powerpc/crypto/aes_xts.c b/arch/powerpc/crypto/aes_xts.c
index dabbccb41550..9440e771cede 100644
--- a/arch/powerpc/crypto/aes_xts.c
+++ b/arch/powerpc/crypto/aes_xts.c
@@ -13,6 +13,10 @@
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
#include "aesp8-ppc.h"
diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c
deleted file mode 100644
index d8796decc1fb..000000000000
--- a/arch/powerpc/crypto/chacha-p10-glue.c
+++ /dev/null
@@ -1,221 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
- *
- * Copyright 2023- IBM Corp. All rights reserved.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
-#include <linux/sizes.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-
-asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src,
- unsigned int len, int nrounds);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10);
-
-static void vsx_begin(void)
-{
- preempt_disable();
- enable_kernel_vsx();
-}
-
-static void vsx_end(void)
-{
- disable_kernel_vsx();
- preempt_enable();
-}
-
-static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- unsigned int l = bytes & ~0x0FF;
-
- if (l > 0) {
- chacha_p10le_8x(state, dst, src, l, nrounds);
- bytes -= l;
- src += l;
- dst += l;
- state[12] += l / CHACHA_BLOCK_SIZE;
- }
-
- if (bytes > 0)
- chacha_crypt_generic(state, dst, src, bytes, nrounds);
-}
-
-void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
-{
- hchacha_block_generic(state, stream, nrounds);
-}
-EXPORT_SYMBOL(hchacha_block_arch);
-
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
- int nrounds)
-{
- if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE ||
- !crypto_simd_usable())
- return chacha_crypt_generic(state, dst, src, bytes, nrounds);
-
- do {
- unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
-
- vsx_begin();
- chacha_p10_do_8x(state, dst, src, todo, nrounds);
- vsx_end();
-
- bytes -= todo;
- src += todo;
- dst += todo;
- } while (bytes);
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-static int chacha_p10_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = rounddown(nbytes, walk.stride);
-
- if (!crypto_simd_usable()) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- vsx_begin();
- chacha_p10_do_8x(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- vsx_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int chacha_p10(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_p10_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_p10(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
- hchacha_block_arch(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_p10_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_p10,
- .decrypt = chacha_p10,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_p10,
- .decrypt = xchacha_p10,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_p10,
- .decrypt = xchacha_p10,
- }
-};
-
-static int __init chacha_p10_init(void)
-{
- if (!cpu_has_feature(CPU_FTR_ARCH_31))
- return 0;
-
- static_branch_enable(&have_p10);
-
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit chacha_p10_exit(void)
-{
- if (!static_branch_likely(&have_p10))
- return;
-
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_p10_init);
-module_exit(chacha_p10_exit);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
-MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-p10");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-p10");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-p10");
diff --git a/arch/powerpc/crypto/chacha-p10le-8x.S b/arch/powerpc/crypto/chacha-p10le-8x.S
deleted file mode 100644
index 17bedb66b822..000000000000
--- a/arch/powerpc/crypto/chacha-p10le-8x.S
+++ /dev/null
@@ -1,842 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#
-# Accelerated chacha20 implementation for ppc64le.
-#
-# Copyright 2023- IBM Corp. All rights reserved
-#
-#===================================================================================
-# Written by Danny Tsen <dtsen@us.ibm.com>
-#
-# chacha_p10le_8x(u32 *state, byte *dst, const byte *src,
-# size_t len, int nrounds);
-#
-# do rounds, 8 quarter rounds
-# 1. a += b; d ^= a; d <<<= 16;
-# 2. c += d; b ^= c; b <<<= 12;
-# 3. a += b; d ^= a; d <<<= 8;
-# 4. c += d; b ^= c; b <<<= 7
-#
-# row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 16
-# row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 12
-# row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 8
-# row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 7
-#
-# 4 blocks (a b c d)
-#
-# a0 b0 c0 d0
-# a1 b1 c1 d1
-# ...
-# a4 b4 c4 d4
-# ...
-# a8 b8 c8 d8
-# ...
-# a12 b12 c12 d12
-# a13 ...
-# a14 ...
-# a15 b15 c15 d15
-#
-# Column round (v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
-# Diagnal round (v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
-#
-
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/asm-compat.h>
-#include <linux/linkage.h>
-
-.machine "any"
-.text
-
-.macro SAVE_GPR GPR OFFSET FRAME
- std \GPR,\OFFSET(\FRAME)
-.endm
-
-.macro SAVE_VRS VRS OFFSET FRAME
- li 16, \OFFSET
- stvx \VRS, 16, \FRAME
-.endm
-
-.macro SAVE_VSX VSX OFFSET FRAME
- li 16, \OFFSET
- stxvx \VSX, 16, \FRAME
-.endm
-
-.macro RESTORE_GPR GPR OFFSET FRAME
- ld \GPR,\OFFSET(\FRAME)
-.endm
-
-.macro RESTORE_VRS VRS OFFSET FRAME
- li 16, \OFFSET
- lvx \VRS, 16, \FRAME
-.endm
-
-.macro RESTORE_VSX VSX OFFSET FRAME
- li 16, \OFFSET
- lxvx \VSX, 16, \FRAME
-.endm
-
-.macro SAVE_REGS
- mflr 0
- std 0, 16(1)
- stdu 1,-752(1)
-
- SAVE_GPR 14, 112, 1
- SAVE_GPR 15, 120, 1
- SAVE_GPR 16, 128, 1
- SAVE_GPR 17, 136, 1
- SAVE_GPR 18, 144, 1
- SAVE_GPR 19, 152, 1
- SAVE_GPR 20, 160, 1
- SAVE_GPR 21, 168, 1
- SAVE_GPR 22, 176, 1
- SAVE_GPR 23, 184, 1
- SAVE_GPR 24, 192, 1
- SAVE_GPR 25, 200, 1
- SAVE_GPR 26, 208, 1
- SAVE_GPR 27, 216, 1
- SAVE_GPR 28, 224, 1
- SAVE_GPR 29, 232, 1
- SAVE_GPR 30, 240, 1
- SAVE_GPR 31, 248, 1
-
- addi 9, 1, 256
- SAVE_VRS 20, 0, 9
- SAVE_VRS 21, 16, 9
- SAVE_VRS 22, 32, 9
- SAVE_VRS 23, 48, 9
- SAVE_VRS 24, 64, 9
- SAVE_VRS 25, 80, 9
- SAVE_VRS 26, 96, 9
- SAVE_VRS 27, 112, 9
- SAVE_VRS 28, 128, 9
- SAVE_VRS 29, 144, 9
- SAVE_VRS 30, 160, 9
- SAVE_VRS 31, 176, 9
-
- SAVE_VSX 14, 192, 9
- SAVE_VSX 15, 208, 9
- SAVE_VSX 16, 224, 9
- SAVE_VSX 17, 240, 9
- SAVE_VSX 18, 256, 9
- SAVE_VSX 19, 272, 9
- SAVE_VSX 20, 288, 9
- SAVE_VSX 21, 304, 9
- SAVE_VSX 22, 320, 9
- SAVE_VSX 23, 336, 9
- SAVE_VSX 24, 352, 9
- SAVE_VSX 25, 368, 9
- SAVE_VSX 26, 384, 9
- SAVE_VSX 27, 400, 9
- SAVE_VSX 28, 416, 9
- SAVE_VSX 29, 432, 9
- SAVE_VSX 30, 448, 9
- SAVE_VSX 31, 464, 9
-.endm # SAVE_REGS
-
-.macro RESTORE_REGS
- addi 9, 1, 256
- RESTORE_VRS 20, 0, 9
- RESTORE_VRS 21, 16, 9
- RESTORE_VRS 22, 32, 9
- RESTORE_VRS 23, 48, 9
- RESTORE_VRS 24, 64, 9
- RESTORE_VRS 25, 80, 9
- RESTORE_VRS 26, 96, 9
- RESTORE_VRS 27, 112, 9
- RESTORE_VRS 28, 128, 9
- RESTORE_VRS 29, 144, 9
- RESTORE_VRS 30, 160, 9
- RESTORE_VRS 31, 176, 9
-
- RESTORE_VSX 14, 192, 9
- RESTORE_VSX 15, 208, 9
- RESTORE_VSX 16, 224, 9
- RESTORE_VSX 17, 240, 9
- RESTORE_VSX 18, 256, 9
- RESTORE_VSX 19, 272, 9
- RESTORE_VSX 20, 288, 9
- RESTORE_VSX 21, 304, 9
- RESTORE_VSX 22, 320, 9
- RESTORE_VSX 23, 336, 9
- RESTORE_VSX 24, 352, 9
- RESTORE_VSX 25, 368, 9
- RESTORE_VSX 26, 384, 9
- RESTORE_VSX 27, 400, 9
- RESTORE_VSX 28, 416, 9
- RESTORE_VSX 29, 432, 9
- RESTORE_VSX 30, 448, 9
- RESTORE_VSX 31, 464, 9
-
- RESTORE_GPR 14, 112, 1
- RESTORE_GPR 15, 120, 1
- RESTORE_GPR 16, 128, 1
- RESTORE_GPR 17, 136, 1
- RESTORE_GPR 18, 144, 1
- RESTORE_GPR 19, 152, 1
- RESTORE_GPR 20, 160, 1
- RESTORE_GPR 21, 168, 1
- RESTORE_GPR 22, 176, 1
- RESTORE_GPR 23, 184, 1
- RESTORE_GPR 24, 192, 1
- RESTORE_GPR 25, 200, 1
- RESTORE_GPR 26, 208, 1
- RESTORE_GPR 27, 216, 1
- RESTORE_GPR 28, 224, 1
- RESTORE_GPR 29, 232, 1
- RESTORE_GPR 30, 240, 1
- RESTORE_GPR 31, 248, 1
-
- addi 1, 1, 752
- ld 0, 16(1)
- mtlr 0
-.endm # RESTORE_REGS
-
-.macro QT_loop_8x
- # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
- xxlor 0, 32+25, 32+25
- xxlor 32+25, 20, 20
- vadduwm 0, 0, 4
- vadduwm 1, 1, 5
- vadduwm 2, 2, 6
- vadduwm 3, 3, 7
- vadduwm 16, 16, 20
- vadduwm 17, 17, 21
- vadduwm 18, 18, 22
- vadduwm 19, 19, 23
-
- vpermxor 12, 12, 0, 25
- vpermxor 13, 13, 1, 25
- vpermxor 14, 14, 2, 25
- vpermxor 15, 15, 3, 25
- vpermxor 28, 28, 16, 25
- vpermxor 29, 29, 17, 25
- vpermxor 30, 30, 18, 25
- vpermxor 31, 31, 19, 25
- xxlor 32+25, 0, 0
- vadduwm 8, 8, 12
- vadduwm 9, 9, 13
- vadduwm 10, 10, 14
- vadduwm 11, 11, 15
- vadduwm 24, 24, 28
- vadduwm 25, 25, 29
- vadduwm 26, 26, 30
- vadduwm 27, 27, 31
- vxor 4, 4, 8
- vxor 5, 5, 9
- vxor 6, 6, 10
- vxor 7, 7, 11
- vxor 20, 20, 24
- vxor 21, 21, 25
- vxor 22, 22, 26
- vxor 23, 23, 27
-
- xxlor 0, 32+25, 32+25
- xxlor 32+25, 21, 21
- vrlw 4, 4, 25 #
- vrlw 5, 5, 25
- vrlw 6, 6, 25
- vrlw 7, 7, 25
- vrlw 20, 20, 25 #
- vrlw 21, 21, 25
- vrlw 22, 22, 25
- vrlw 23, 23, 25
- xxlor 32+25, 0, 0
- vadduwm 0, 0, 4
- vadduwm 1, 1, 5
- vadduwm 2, 2, 6
- vadduwm 3, 3, 7
- vadduwm 16, 16, 20
- vadduwm 17, 17, 21
- vadduwm 18, 18, 22
- vadduwm 19, 19, 23
-
- xxlor 0, 32+25, 32+25
- xxlor 32+25, 22, 22
- vpermxor 12, 12, 0, 25
- vpermxor 13, 13, 1, 25
- vpermxor 14, 14, 2, 25
- vpermxor 15, 15, 3, 25
- vpermxor 28, 28, 16, 25
- vpermxor 29, 29, 17, 25
- vpermxor 30, 30, 18, 25
- vpermxor 31, 31, 19, 25
- xxlor 32+25, 0, 0
- vadduwm 8, 8, 12
- vadduwm 9, 9, 13
- vadduwm 10, 10, 14
- vadduwm 11, 11, 15
- vadduwm 24, 24, 28
- vadduwm 25, 25, 29
- vadduwm 26, 26, 30
- vadduwm 27, 27, 31
- xxlor 0, 32+28, 32+28
- xxlor 32+28, 23, 23
- vxor 4, 4, 8
- vxor 5, 5, 9
- vxor 6, 6, 10
- vxor 7, 7, 11
- vxor 20, 20, 24
- vxor 21, 21, 25
- vxor 22, 22, 26
- vxor 23, 23, 27
- vrlw 4, 4, 28 #
- vrlw 5, 5, 28
- vrlw 6, 6, 28
- vrlw 7, 7, 28
- vrlw 20, 20, 28 #
- vrlw 21, 21, 28
- vrlw 22, 22, 28
- vrlw 23, 23, 28
- xxlor 32+28, 0, 0
-
- # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
- xxlor 0, 32+25, 32+25
- xxlor 32+25, 20, 20
- vadduwm 0, 0, 5
- vadduwm 1, 1, 6
- vadduwm 2, 2, 7
- vadduwm 3, 3, 4
- vadduwm 16, 16, 21
- vadduwm 17, 17, 22
- vadduwm 18, 18, 23
- vadduwm 19, 19, 20
-
- vpermxor 15, 15, 0, 25
- vpermxor 12, 12, 1, 25
- vpermxor 13, 13, 2, 25
- vpermxor 14, 14, 3, 25
- vpermxor 31, 31, 16, 25
- vpermxor 28, 28, 17, 25
- vpermxor 29, 29, 18, 25
- vpermxor 30, 30, 19, 25
-
- xxlor 32+25, 0, 0
- vadduwm 10, 10, 15
- vadduwm 11, 11, 12
- vadduwm 8, 8, 13
- vadduwm 9, 9, 14
- vadduwm 26, 26, 31
- vadduwm 27, 27, 28
- vadduwm 24, 24, 29
- vadduwm 25, 25, 30
- vxor 5, 5, 10
- vxor 6, 6, 11
- vxor 7, 7, 8
- vxor 4, 4, 9
- vxor 21, 21, 26
- vxor 22, 22, 27
- vxor 23, 23, 24
- vxor 20, 20, 25
-
- xxlor 0, 32+25, 32+25
- xxlor 32+25, 21, 21
- vrlw 5, 5, 25
- vrlw 6, 6, 25
- vrlw 7, 7, 25
- vrlw 4, 4, 25
- vrlw 21, 21, 25
- vrlw 22, 22, 25
- vrlw 23, 23, 25
- vrlw 20, 20, 25
- xxlor 32+25, 0, 0
-
- vadduwm 0, 0, 5
- vadduwm 1, 1, 6
- vadduwm 2, 2, 7
- vadduwm 3, 3, 4
- vadduwm 16, 16, 21
- vadduwm 17, 17, 22
- vadduwm 18, 18, 23
- vadduwm 19, 19, 20
-
- xxlor 0, 32+25, 32+25
- xxlor 32+25, 22, 22
- vpermxor 15, 15, 0, 25
- vpermxor 12, 12, 1, 25
- vpermxor 13, 13, 2, 25
- vpermxor 14, 14, 3, 25
- vpermxor 31, 31, 16, 25
- vpermxor 28, 28, 17, 25
- vpermxor 29, 29, 18, 25
- vpermxor 30, 30, 19, 25
- xxlor 32+25, 0, 0
-
- vadduwm 10, 10, 15
- vadduwm 11, 11, 12
- vadduwm 8, 8, 13
- vadduwm 9, 9, 14
- vadduwm 26, 26, 31
- vadduwm 27, 27, 28
- vadduwm 24, 24, 29
- vadduwm 25, 25, 30
-
- xxlor 0, 32+28, 32+28
- xxlor 32+28, 23, 23
- vxor 5, 5, 10
- vxor 6, 6, 11
- vxor 7, 7, 8
- vxor 4, 4, 9
- vxor 21, 21, 26
- vxor 22, 22, 27
- vxor 23, 23, 24
- vxor 20, 20, 25
- vrlw 5, 5, 28
- vrlw 6, 6, 28
- vrlw 7, 7, 28
- vrlw 4, 4, 28
- vrlw 21, 21, 28
- vrlw 22, 22, 28
- vrlw 23, 23, 28
- vrlw 20, 20, 28
- xxlor 32+28, 0, 0
-.endm
-
-.macro QT_loop_4x
- # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
- vadduwm 0, 0, 4
- vadduwm 1, 1, 5
- vadduwm 2, 2, 6
- vadduwm 3, 3, 7
- vpermxor 12, 12, 0, 20
- vpermxor 13, 13, 1, 20
- vpermxor 14, 14, 2, 20
- vpermxor 15, 15, 3, 20
- vadduwm 8, 8, 12
- vadduwm 9, 9, 13
- vadduwm 10, 10, 14
- vadduwm 11, 11, 15
- vxor 4, 4, 8
- vxor 5, 5, 9
- vxor 6, 6, 10
- vxor 7, 7, 11
- vrlw 4, 4, 21
- vrlw 5, 5, 21
- vrlw 6, 6, 21
- vrlw 7, 7, 21
- vadduwm 0, 0, 4
- vadduwm 1, 1, 5
- vadduwm 2, 2, 6
- vadduwm 3, 3, 7
- vpermxor 12, 12, 0, 22
- vpermxor 13, 13, 1, 22
- vpermxor 14, 14, 2, 22
- vpermxor 15, 15, 3, 22
- vadduwm 8, 8, 12
- vadduwm 9, 9, 13
- vadduwm 10, 10, 14
- vadduwm 11, 11, 15
- vxor 4, 4, 8
- vxor 5, 5, 9
- vxor 6, 6, 10
- vxor 7, 7, 11
- vrlw 4, 4, 23
- vrlw 5, 5, 23
- vrlw 6, 6, 23
- vrlw 7, 7, 23
-
- # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
- vadduwm 0, 0, 5
- vadduwm 1, 1, 6
- vadduwm 2, 2, 7
- vadduwm 3, 3, 4
- vpermxor 15, 15, 0, 20
- vpermxor 12, 12, 1, 20
- vpermxor 13, 13, 2, 20
- vpermxor 14, 14, 3, 20
- vadduwm 10, 10, 15
- vadduwm 11, 11, 12
- vadduwm 8, 8, 13
- vadduwm 9, 9, 14
- vxor 5, 5, 10
- vxor 6, 6, 11
- vxor 7, 7, 8
- vxor 4, 4, 9
- vrlw 5, 5, 21
- vrlw 6, 6, 21
- vrlw 7, 7, 21
- vrlw 4, 4, 21
- vadduwm 0, 0, 5
- vadduwm 1, 1, 6
- vadduwm 2, 2, 7
- vadduwm 3, 3, 4
- vpermxor 15, 15, 0, 22
- vpermxor 12, 12, 1, 22
- vpermxor 13, 13, 2, 22
- vpermxor 14, 14, 3, 22
- vadduwm 10, 10, 15
- vadduwm 11, 11, 12
- vadduwm 8, 8, 13
- vadduwm 9, 9, 14
- vxor 5, 5, 10
- vxor 6, 6, 11
- vxor 7, 7, 8
- vxor 4, 4, 9
- vrlw 5, 5, 23
- vrlw 6, 6, 23
- vrlw 7, 7, 23
- vrlw 4, 4, 23
-.endm
-
-# Transpose
-.macro TP_4x a0 a1 a2 a3
- xxmrghw 10, 32+\a0, 32+\a1 # a0, a1, b0, b1
- xxmrghw 11, 32+\a2, 32+\a3 # a2, a3, b2, b3
- xxmrglw 12, 32+\a0, 32+\a1 # c0, c1, d0, d1
- xxmrglw 13, 32+\a2, 32+\a3 # c2, c3, d2, d3
- xxpermdi 32+\a0, 10, 11, 0 # a0, a1, a2, a3
- xxpermdi 32+\a1, 10, 11, 3 # b0, b1, b2, b3
- xxpermdi 32+\a2, 12, 13, 0 # c0, c1, c2, c3
- xxpermdi 32+\a3, 12, 13, 3 # d0, d1, d2, d3
-.endm
-
-# key stream = working state + state
-.macro Add_state S
- vadduwm \S+0, \S+0, 16-\S
- vadduwm \S+4, \S+4, 17-\S
- vadduwm \S+8, \S+8, 18-\S
- vadduwm \S+12, \S+12, 19-\S
-
- vadduwm \S+1, \S+1, 16-\S
- vadduwm \S+5, \S+5, 17-\S
- vadduwm \S+9, \S+9, 18-\S
- vadduwm \S+13, \S+13, 19-\S
-
- vadduwm \S+2, \S+2, 16-\S
- vadduwm \S+6, \S+6, 17-\S
- vadduwm \S+10, \S+10, 18-\S
- vadduwm \S+14, \S+14, 19-\S
-
- vadduwm \S+3, \S+3, 16-\S
- vadduwm \S+7, \S+7, 17-\S
- vadduwm \S+11, \S+11, 18-\S
- vadduwm \S+15, \S+15, 19-\S
-.endm
-
-#
-# write 256 bytes
-#
-.macro Write_256 S
- add 9, 14, 5
- add 16, 14, 4
- lxvw4x 0, 0, 9
- lxvw4x 1, 17, 9
- lxvw4x 2, 18, 9
- lxvw4x 3, 19, 9
- lxvw4x 4, 20, 9
- lxvw4x 5, 21, 9
- lxvw4x 6, 22, 9
- lxvw4x 7, 23, 9
- lxvw4x 8, 24, 9
- lxvw4x 9, 25, 9
- lxvw4x 10, 26, 9
- lxvw4x 11, 27, 9
- lxvw4x 12, 28, 9
- lxvw4x 13, 29, 9
- lxvw4x 14, 30, 9
- lxvw4x 15, 31, 9
-
- xxlxor \S+32, \S+32, 0
- xxlxor \S+36, \S+36, 1
- xxlxor \S+40, \S+40, 2
- xxlxor \S+44, \S+44, 3
- xxlxor \S+33, \S+33, 4
- xxlxor \S+37, \S+37, 5
- xxlxor \S+41, \S+41, 6
- xxlxor \S+45, \S+45, 7
- xxlxor \S+34, \S+34, 8
- xxlxor \S+38, \S+38, 9
- xxlxor \S+42, \S+42, 10
- xxlxor \S+46, \S+46, 11
- xxlxor \S+35, \S+35, 12
- xxlxor \S+39, \S+39, 13
- xxlxor \S+43, \S+43, 14
- xxlxor \S+47, \S+47, 15
-
- stxvw4x \S+32, 0, 16
- stxvw4x \S+36, 17, 16
- stxvw4x \S+40, 18, 16
- stxvw4x \S+44, 19, 16
-
- stxvw4x \S+33, 20, 16
- stxvw4x \S+37, 21, 16
- stxvw4x \S+41, 22, 16
- stxvw4x \S+45, 23, 16
-
- stxvw4x \S+34, 24, 16
- stxvw4x \S+38, 25, 16
- stxvw4x \S+42, 26, 16
- stxvw4x \S+46, 27, 16
-
- stxvw4x \S+35, 28, 16
- stxvw4x \S+39, 29, 16
- stxvw4x \S+43, 30, 16
- stxvw4x \S+47, 31, 16
-
-.endm
-
-#
-# chacha20_p10le_8x(u32 *state, byte *dst, const byte *src, size_t len, int nrounds);
-#
-SYM_FUNC_START(chacha_p10le_8x)
-.align 5
- cmpdi 6, 0
- ble Out_no_chacha
-
- SAVE_REGS
-
- # r17 - r31 mainly for Write_256 macro.
- li 17, 16
- li 18, 32
- li 19, 48
- li 20, 64
- li 21, 80
- li 22, 96
- li 23, 112
- li 24, 128
- li 25, 144
- li 26, 160
- li 27, 176
- li 28, 192
- li 29, 208
- li 30, 224
- li 31, 240
-
- mr 15, 6 # len
- li 14, 0 # offset to inp and outp
-
- lxvw4x 48, 0, 3 # vr16, constants
- lxvw4x 49, 17, 3 # vr17, key 1
- lxvw4x 50, 18, 3 # vr18, key 2
- lxvw4x 51, 19, 3 # vr19, counter, nonce
-
- # create (0, 1, 2, 3) counters
- vspltisw 0, 0
- vspltisw 1, 1
- vspltisw 2, 2
- vspltisw 3, 3
- vmrghw 4, 0, 1
- vmrglw 5, 2, 3
- vsldoi 30, 4, 5, 8 # vr30 counter, 4 (0, 1, 2, 3)
-
- vspltisw 21, 12
- vspltisw 23, 7
-
- addis 11, 2, permx@toc@ha
- addi 11, 11, permx@toc@l
- lxvw4x 32+20, 0, 11
- lxvw4x 32+22, 17, 11
-
- sradi 8, 7, 1
-
- mtctr 8
-
- # save constants to vsx
- xxlor 16, 48, 48
- xxlor 17, 49, 49
- xxlor 18, 50, 50
- xxlor 19, 51, 51
-
- vspltisw 25, 4
- vspltisw 26, 8
-
- xxlor 25, 32+26, 32+26
- xxlor 24, 32+25, 32+25
-
- vadduwm 31, 30, 25 # counter = (0, 1, 2, 3) + (4, 4, 4, 4)
- xxlor 30, 32+30, 32+30
- xxlor 31, 32+31, 32+31
-
- xxlor 20, 32+20, 32+20
- xxlor 21, 32+21, 32+21
- xxlor 22, 32+22, 32+22
- xxlor 23, 32+23, 32+23
-
- cmpdi 6, 512
- blt Loop_last
-
-Loop_8x:
- xxspltw 32+0, 16, 0
- xxspltw 32+1, 16, 1
- xxspltw 32+2, 16, 2
- xxspltw 32+3, 16, 3
-
- xxspltw 32+4, 17, 0
- xxspltw 32+5, 17, 1
- xxspltw 32+6, 17, 2
- xxspltw 32+7, 17, 3
- xxspltw 32+8, 18, 0
- xxspltw 32+9, 18, 1
- xxspltw 32+10, 18, 2
- xxspltw 32+11, 18, 3
- xxspltw 32+12, 19, 0
- xxspltw 32+13, 19, 1
- xxspltw 32+14, 19, 2
- xxspltw 32+15, 19, 3
- vadduwm 12, 12, 30 # increase counter
-
- xxspltw 32+16, 16, 0
- xxspltw 32+17, 16, 1
- xxspltw 32+18, 16, 2
- xxspltw 32+19, 16, 3
-
- xxspltw 32+20, 17, 0
- xxspltw 32+21, 17, 1
- xxspltw 32+22, 17, 2
- xxspltw 32+23, 17, 3
- xxspltw 32+24, 18, 0
- xxspltw 32+25, 18, 1
- xxspltw 32+26, 18, 2
- xxspltw 32+27, 18, 3
- xxspltw 32+28, 19, 0
- xxspltw 32+29, 19, 1
- vadduwm 28, 28, 31 # increase counter
- xxspltw 32+30, 19, 2
- xxspltw 32+31, 19, 3
-
-.align 5
-quarter_loop_8x:
- QT_loop_8x
-
- bdnz quarter_loop_8x
-
- xxlor 0, 32+30, 32+30
- xxlor 32+30, 30, 30
- vadduwm 12, 12, 30
- xxlor 32+30, 0, 0
- TP_4x 0, 1, 2, 3
- TP_4x 4, 5, 6, 7
- TP_4x 8, 9, 10, 11
- TP_4x 12, 13, 14, 15
-
- xxlor 0, 48, 48
- xxlor 1, 49, 49
- xxlor 2, 50, 50
- xxlor 3, 51, 51
- xxlor 48, 16, 16
- xxlor 49, 17, 17
- xxlor 50, 18, 18
- xxlor 51, 19, 19
- Add_state 0
- xxlor 48, 0, 0
- xxlor 49, 1, 1
- xxlor 50, 2, 2
- xxlor 51, 3, 3
- Write_256 0
- addi 14, 14, 256 # offset +=256
- addi 15, 15, -256 # len -=256
-
- xxlor 5, 32+31, 32+31
- xxlor 32+31, 31, 31
- vadduwm 28, 28, 31
- xxlor 32+31, 5, 5
- TP_4x 16+0, 16+1, 16+2, 16+3
- TP_4x 16+4, 16+5, 16+6, 16+7
- TP_4x 16+8, 16+9, 16+10, 16+11
- TP_4x 16+12, 16+13, 16+14, 16+15
-
- xxlor 32, 16, 16
- xxlor 33, 17, 17
- xxlor 34, 18, 18
- xxlor 35, 19, 19
- Add_state 16
- Write_256 16
- addi 14, 14, 256 # offset +=256
- addi 15, 15, -256 # len +=256
-
- xxlor 32+24, 24, 24
- xxlor 32+25, 25, 25
- xxlor 32+30, 30, 30
- vadduwm 30, 30, 25
- vadduwm 31, 30, 24
- xxlor 30, 32+30, 32+30
- xxlor 31, 32+31, 32+31
-
- cmpdi 15, 0
- beq Out_loop
-
- cmpdi 15, 512
- blt Loop_last
-
- mtctr 8
- b Loop_8x
-
-Loop_last:
- lxvw4x 48, 0, 3 # vr16, constants
- lxvw4x 49, 17, 3 # vr17, key 1
- lxvw4x 50, 18, 3 # vr18, key 2
- lxvw4x 51, 19, 3 # vr19, counter, nonce
-
- vspltisw 21, 12
- vspltisw 23, 7
- addis 11, 2, permx@toc@ha
- addi 11, 11, permx@toc@l
- lxvw4x 32+20, 0, 11
- lxvw4x 32+22, 17, 11
-
- sradi 8, 7, 1
- mtctr 8
-
-Loop_4x:
- vspltw 0, 16, 0
- vspltw 1, 16, 1
- vspltw 2, 16, 2
- vspltw 3, 16, 3
-
- vspltw 4, 17, 0
- vspltw 5, 17, 1
- vspltw 6, 17, 2
- vspltw 7, 17, 3
- vspltw 8, 18, 0
- vspltw 9, 18, 1
- vspltw 10, 18, 2
- vspltw 11, 18, 3
- vspltw 12, 19, 0
- vadduwm 12, 12, 30 # increase counter
- vspltw 13, 19, 1
- vspltw 14, 19, 2
- vspltw 15, 19, 3
-
-.align 5
-quarter_loop:
- QT_loop_4x
-
- bdnz quarter_loop
-
- vadduwm 12, 12, 30
- TP_4x 0, 1, 2, 3
- TP_4x 4, 5, 6, 7
- TP_4x 8, 9, 10, 11
- TP_4x 12, 13, 14, 15
-
- Add_state 0
- Write_256 0
- addi 14, 14, 256 # offset += 256
- addi 15, 15, -256 # len += 256
-
- # Update state counter
- vspltisw 25, 4
- vadduwm 30, 30, 25
-
- cmpdi 15, 0
- beq Out_loop
- cmpdi 15, 256
- blt Out_loop
-
- mtctr 8
- b Loop_4x
-
-Out_loop:
- RESTORE_REGS
- blr
-
-Out_no_chacha:
- li 3, 0
- blr
-SYM_FUNC_END(chacha_p10le_8x)
-
-SYM_DATA_START_LOCAL(PERMX)
-.align 5
-permx:
-.long 0x22330011, 0x66774455, 0xaabb8899, 0xeeffccdd
-.long 0x11223300, 0x55667744, 0x99aabb88, 0xddeeffcc
-SYM_DATA_END(PERMX)
diff --git a/arch/powerpc/crypto/ghash.c b/arch/powerpc/crypto/ghash.c
index 77eca20bc7ac..7308735bdb33 100644
--- a/arch/powerpc/crypto/ghash.c
+++ b/arch/powerpc/crypto/ghash.c
@@ -11,19 +11,18 @@
* Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <asm/simd.h>
+#include "aesp8-ppc.h"
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
-#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/b128ops.h>
-#include "aesp8-ppc.h"
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
@@ -39,15 +38,12 @@ struct p8_ghash_ctx {
struct p8_ghash_desc_ctx {
u64 shash[2];
- u8 buffer[GHASH_DIGEST_SIZE];
- int bytes;
};
static int p8_ghash_init(struct shash_desc *desc)
{
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- dctx->bytes = 0;
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
return 0;
}
@@ -74,27 +70,30 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
}
static inline void __ghash_block(struct p8_ghash_ctx *ctx,
- struct p8_ghash_desc_ctx *dctx)
+ struct p8_ghash_desc_ctx *dctx,
+ const u8 *src)
{
if (crypto_simd_usable()) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
+ gcm_ghash_p8(dctx->shash, ctx->htable, src, GHASH_BLOCK_SIZE);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
} else {
- crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
+ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
gf128mul_lle((be128 *)dctx->shash, &ctx->key);
}
}
-static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
- struct p8_ghash_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
+static inline int __ghash_blocks(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
{
+ int remain = srclen - round_down(srclen, GHASH_BLOCK_SIZE);
+
+ srclen -= remain;
if (crypto_simd_usable()) {
preempt_disable();
pagefault_disable();
@@ -105,62 +104,38 @@ static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
pagefault_enable();
preempt_enable();
} else {
- while (srclen >= GHASH_BLOCK_SIZE) {
+ do {
crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
gf128mul_lle((be128 *)dctx->shash, &ctx->key);
srclen -= GHASH_BLOCK_SIZE;
src += GHASH_BLOCK_SIZE;
- }
+ } while (srclen);
}
+
+ return remain;
}
static int p8_ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- unsigned int len;
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src,
- srclen);
- dctx->bytes += srclen;
- return 0;
- }
- memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_DIGEST_SIZE - dctx->bytes);
-
- __ghash_block(ctx, dctx);
-
- src += GHASH_DIGEST_SIZE - dctx->bytes;
- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
- if (len) {
- __ghash_blocks(ctx, dctx, src, len);
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
- }
- return 0;
+ return __ghash_blocks(ctx, dctx, src, srclen);
}
-static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+static int p8_ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- int i;
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
- dctx->buffer[i] = 0;
- __ghash_block(ctx, dctx);
- dctx->bytes = 0;
+ if (len) {
+ u8 buf[GHASH_BLOCK_SIZE] = {};
+
+ memcpy(buf, src, len);
+ __ghash_block(ctx, dctx, buf);
+ memzero_explicit(buf, sizeof(buf));
}
memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
return 0;
@@ -170,14 +145,14 @@ struct shash_alg p8_ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = p8_ghash_init,
.update = p8_ghash_update,
- .final = p8_ghash_final,
+ .finup = p8_ghash_finup,
.setkey = p8_ghash_setkey,
- .descsize = sizeof(struct p8_ghash_desc_ctx)
- + sizeof(struct ghash_desc_ctx),
+ .descsize = sizeof(struct p8_ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index c24f605033bd..204440a90cd8 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -8,25 +8,13 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/md5.h>
-#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
extern void ppc_md5_transform(u32 *state, const u8 *src, u32 blocks);
-static inline void ppc_md5_clear_context(struct md5_state *sctx)
-{
- int count = sizeof(struct md5_state) >> 2;
- u32 *ptr = (u32 *)sctx;
-
- /* make sure we can clear the fast way */
- BUILD_BUG_ON(sizeof(struct md5_state) % 4);
- do { *ptr++ = 0; } while (--count);
-}
-
static int ppc_md5_init(struct shash_desc *desc)
{
struct md5_state *sctx = shash_desc_ctx(desc);
@@ -44,79 +32,34 @@ static int ppc_md5_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct md5_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->byte_count & 0x3f;
- unsigned int avail = 64 - offset;
- const u8 *src = data;
- sctx->byte_count += len;
-
- if (avail > len) {
- memcpy((char *)sctx->block + offset, src, len);
- return 0;
- }
-
- if (offset) {
- memcpy((char *)sctx->block + offset, src, avail);
- ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1);
- len -= avail;
- src += avail;
- }
-
- if (len > 63) {
- ppc_md5_transform(sctx->hash, src, len >> 6);
- src += len & ~0x3f;
- len &= 0x3f;
- }
-
- memcpy((char *)sctx->block, src, len);
- return 0;
+ sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE);
+ ppc_md5_transform(sctx->hash, data, len >> 6);
+ return len - round_down(len, MD5_HMAC_BLOCK_SIZE);
}
-static int ppc_md5_final(struct shash_desc *desc, u8 *out)
+static int ppc_md5_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int offset, u8 *out)
{
struct md5_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->byte_count & 0x3f;
- const u8 *src = (const u8 *)sctx->block;
- u8 *p = (u8 *)src + offset;
- int padlen = 55 - offset;
- __le64 *pbits = (__le64 *)((char *)sctx->block + 56);
+ __le64 block[MD5_BLOCK_WORDS] = {};
+ u8 *p = memcpy(block, src, offset);
__le32 *dst = (__le32 *)out;
+ __le64 *pbits;
+ src = p;
+ p += offset;
*p++ = 0x80;
-
- if (padlen < 0) {
- memset(p, 0x00, padlen + sizeof (u64));
- ppc_md5_transform(sctx->hash, src, 1);
- p = (char *)sctx->block;
- padlen = 56;
- }
-
- memset(p, 0, padlen);
+ sctx->byte_count += offset;
+ pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1];
*pbits = cpu_to_le64(sctx->byte_count << 3);
- ppc_md5_transform(sctx->hash, src, 1);
+ ppc_md5_transform(sctx->hash, src, (pbits - block + 1) / 8);
+ memzero_explicit(block, sizeof(block));
dst[0] = cpu_to_le32(sctx->hash[0]);
dst[1] = cpu_to_le32(sctx->hash[1]);
dst[2] = cpu_to_le32(sctx->hash[2]);
dst[3] = cpu_to_le32(sctx->hash[3]);
-
- ppc_md5_clear_context(sctx);
- return 0;
-}
-
-static int ppc_md5_export(struct shash_desc *desc, void *out)
-{
- struct md5_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int ppc_md5_import(struct shash_desc *desc, const void *in)
-{
- struct md5_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
return 0;
}
@@ -124,15 +67,13 @@ static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = ppc_md5_init,
.update = ppc_md5_update,
- .final = ppc_md5_final,
- .export = ppc_md5_export,
- .import = ppc_md5_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
+ .finup = ppc_md5_finup,
+ .descsize = MD5_STATE_SIZE,
.base = {
.cra_name = "md5",
.cra_driver_name= "md5-ppc",
.cra_priority = 200,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/poly1305-p10-glue.c b/arch/powerpc/crypto/poly1305-p10-glue.c
deleted file mode 100644
index 369686e9370b..000000000000
--- a/arch/powerpc/crypto/poly1305-p10-glue.c
+++ /dev/null
@@ -1,186 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Poly1305 authenticator algorithm, RFC7539.
- *
- * Copyright 2023- IBM Corp. All rights reserved.
- */
-
-#include <crypto/algapi.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/jump_label.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <crypto/internal/simd.h>
-#include <linux/cpufeature.h>
-#include <linux/unaligned.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-
-asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen);
-asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit);
-asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst);
-
-static void vsx_begin(void)
-{
- preempt_disable();
- enable_kernel_vsx();
-}
-
-static void vsx_end(void)
-{
- disable_kernel_vsx();
- preempt_enable();
-}
-
-static int crypto_poly1305_p10_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- poly1305_core_init(&dctx->h);
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
- const u8 *inp, unsigned int len)
-{
- unsigned int acc = 0;
-
- if (unlikely(!dctx->sset)) {
- if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
- struct poly1305_core_key *key = &dctx->core_r;
-
- key->key.r64[0] = get_unaligned_le64(&inp[0]);
- key->key.r64[1] = get_unaligned_le64(&inp[8]);
- inp += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- acc += POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (len >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(&inp[0]);
- dctx->s[1] = get_unaligned_le32(&inp[4]);
- dctx->s[2] = get_unaligned_le32(&inp[8]);
- dctx->s[3] = get_unaligned_le32(&inp[12]);
- acc += POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return acc;
-}
-
-static int crypto_poly1305_p10_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int bytes, used;
-
- if (unlikely(dctx->buflen)) {
- bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- srclen -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE))) {
- vsx_begin();
- poly1305_64s(&dctx->h, dctx->buf,
- POLY1305_BLOCK_SIZE, 1);
- vsx_end();
- }
- dctx->buflen = 0;
- }
- }
-
- if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
- used = crypto_poly1305_setdctxkey(dctx, src, bytes);
- if (likely(used)) {
- srclen -= used;
- src += used;
- }
- if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) {
- vsx_begin();
- poly1305_p10le_4blocks(&dctx->h, src, srclen);
- vsx_end();
- src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4));
- srclen %= POLY1305_BLOCK_SIZE * 4;
- }
- while (srclen >= POLY1305_BLOCK_SIZE) {
- vsx_begin();
- poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1);
- vsx_end();
- srclen -= POLY1305_BLOCK_SIZE;
- src += POLY1305_BLOCK_SIZE;
- }
- }
-
- if (unlikely(srclen)) {
- dctx->buflen = srclen;
- memcpy(dctx->buf, src, srclen);
- }
-
- return 0;
-}
-
-static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- if ((dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- vsx_begin();
- poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- vsx_end();
- dctx->buflen = 0;
- }
-
- poly1305_emit_64(&dctx->h, &dctx->s, dst);
- return 0;
-}
-
-static struct shash_alg poly1305_alg = {
- .digestsize = POLY1305_DIGEST_SIZE,
- .init = crypto_poly1305_p10_init,
- .update = crypto_poly1305_p10_update,
- .final = crypto_poly1305_p10_final,
- .descsize = sizeof(struct poly1305_desc_ctx),
- .base = {
- .cra_name = "poly1305",
- .cra_driver_name = "poly1305-p10",
- .cra_priority = 300,
- .cra_blocksize = POLY1305_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init poly1305_p10_init(void)
-{
- return crypto_register_shash(&poly1305_alg);
-}
-
-static void __exit poly1305_p10_exit(void)
-{
- crypto_unregister_shash(&poly1305_alg);
-}
-
-module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init);
-module_exit(poly1305_p10_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
-MODULE_DESCRIPTION("Optimized Poly1305 for P10");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-p10");
diff --git a/arch/powerpc/crypto/poly1305-p10le_64.S b/arch/powerpc/crypto/poly1305-p10le_64.S
deleted file mode 100644
index a3c1987f1ecd..000000000000
--- a/arch/powerpc/crypto/poly1305-p10le_64.S
+++ /dev/null
@@ -1,1075 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#
-# Accelerated poly1305 implementation for ppc64le.
-#
-# Copyright 2023- IBM Corp. All rights reserved
-#
-#===================================================================================
-# Written by Danny Tsen <dtsen@us.ibm.com>
-#
-# Poly1305 - this version mainly using vector/VSX/Scalar
-# - 26 bits limbs
-# - Handle multiple 64 byte blcok.
-#
-# Block size 16 bytes
-# key = (r, s)
-# clamp r &= 0x0FFFFFFC0FFFFFFC 0x0FFFFFFC0FFFFFFF
-# p = 2^130 - 5
-# a += m
-# a = (r + a) % p
-# a += s
-#
-# Improve performance by breaking down polynominal to the sum of products with
-# h4 = m1 * r⁴ + m2 * r³ + m3 * r² + m4 * r
-#
-# 07/22/21 - this revison based on the above sum of products. Setup r^4, r^3, r^2, r and s3, s2, s1, s0
-# to 9 vectors for multiplications.
-#
-# setup r^4, r^3, r^2, r vectors
-# vs [r^1, r^3, r^2, r^4]
-# vs0 = [r0,.....]
-# vs1 = [r1,.....]
-# vs2 = [r2,.....]
-# vs3 = [r3,.....]
-# vs4 = [r4,.....]
-# vs5 = [r1*5,...]
-# vs6 = [r2*5,...]
-# vs7 = [r2*5,...]
-# vs8 = [r4*5,...]
-#
-# Each word in a vector consists a member of a "r/s" in [a * r/s].
-#
-# r0, r4*5, r3*5, r2*5, r1*5;
-# r1, r0, r4*5, r3*5, r2*5;
-# r2, r1, r0, r4*5, r3*5;
-# r3, r2, r1, r0, r4*5;
-# r4, r3, r2, r1, r0 ;
-#
-#
-# poly1305_p10le_4blocks( uint8_t *k, uint32_t mlen, uint8_t *m)
-# k = 32 bytes key
-# r3 = k (r, s)
-# r4 = mlen
-# r5 = m
-#
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/asm-compat.h>
-#include <linux/linkage.h>
-
-.machine "any"
-
-.text
-
-.macro SAVE_GPR GPR OFFSET FRAME
- std \GPR,\OFFSET(\FRAME)
-.endm
-
-.macro SAVE_VRS VRS OFFSET FRAME
- li 16, \OFFSET
- stvx \VRS, 16, \FRAME
-.endm
-
-.macro SAVE_VSX VSX OFFSET FRAME
- li 16, \OFFSET
- stxvx \VSX, 16, \FRAME
-.endm
-
-.macro RESTORE_GPR GPR OFFSET FRAME
- ld \GPR,\OFFSET(\FRAME)
-.endm
-
-.macro RESTORE_VRS VRS OFFSET FRAME
- li 16, \OFFSET
- lvx \VRS, 16, \FRAME
-.endm
-
-.macro RESTORE_VSX VSX OFFSET FRAME
- li 16, \OFFSET
- lxvx \VSX, 16, \FRAME
-.endm
-
-.macro SAVE_REGS
- mflr 0
- std 0, 16(1)
- stdu 1,-752(1)
-
- SAVE_GPR 14, 112, 1
- SAVE_GPR 15, 120, 1
- SAVE_GPR 16, 128, 1
- SAVE_GPR 17, 136, 1
- SAVE_GPR 18, 144, 1
- SAVE_GPR 19, 152, 1
- SAVE_GPR 20, 160, 1
- SAVE_GPR 21, 168, 1
- SAVE_GPR 22, 176, 1
- SAVE_GPR 23, 184, 1
- SAVE_GPR 24, 192, 1
- SAVE_GPR 25, 200, 1
- SAVE_GPR 26, 208, 1
- SAVE_GPR 27, 216, 1
- SAVE_GPR 28, 224, 1
- SAVE_GPR 29, 232, 1
- SAVE_GPR 30, 240, 1
- SAVE_GPR 31, 248, 1
-
- addi 9, 1, 256
- SAVE_VRS 20, 0, 9
- SAVE_VRS 21, 16, 9
- SAVE_VRS 22, 32, 9
- SAVE_VRS 23, 48, 9
- SAVE_VRS 24, 64, 9
- SAVE_VRS 25, 80, 9
- SAVE_VRS 26, 96, 9
- SAVE_VRS 27, 112, 9
- SAVE_VRS 28, 128, 9
- SAVE_VRS 29, 144, 9
- SAVE_VRS 30, 160, 9
- SAVE_VRS 31, 176, 9
-
- SAVE_VSX 14, 192, 9
- SAVE_VSX 15, 208, 9
- SAVE_VSX 16, 224, 9
- SAVE_VSX 17, 240, 9
- SAVE_VSX 18, 256, 9
- SAVE_VSX 19, 272, 9
- SAVE_VSX 20, 288, 9
- SAVE_VSX 21, 304, 9
- SAVE_VSX 22, 320, 9
- SAVE_VSX 23, 336, 9
- SAVE_VSX 24, 352, 9
- SAVE_VSX 25, 368, 9
- SAVE_VSX 26, 384, 9
- SAVE_VSX 27, 400, 9
- SAVE_VSX 28, 416, 9
- SAVE_VSX 29, 432, 9
- SAVE_VSX 30, 448, 9
- SAVE_VSX 31, 464, 9
-.endm # SAVE_REGS
-
-.macro RESTORE_REGS
- addi 9, 1, 256
- RESTORE_VRS 20, 0, 9
- RESTORE_VRS 21, 16, 9
- RESTORE_VRS 22, 32, 9
- RESTORE_VRS 23, 48, 9
- RESTORE_VRS 24, 64, 9
- RESTORE_VRS 25, 80, 9
- RESTORE_VRS 26, 96, 9
- RESTORE_VRS 27, 112, 9
- RESTORE_VRS 28, 128, 9
- RESTORE_VRS 29, 144, 9
- RESTORE_VRS 30, 160, 9
- RESTORE_VRS 31, 176, 9
-
- RESTORE_VSX 14, 192, 9
- RESTORE_VSX 15, 208, 9
- RESTORE_VSX 16, 224, 9
- RESTORE_VSX 17, 240, 9
- RESTORE_VSX 18, 256, 9
- RESTORE_VSX 19, 272, 9
- RESTORE_VSX 20, 288, 9
- RESTORE_VSX 21, 304, 9
- RESTORE_VSX 22, 320, 9
- RESTORE_VSX 23, 336, 9
- RESTORE_VSX 24, 352, 9
- RESTORE_VSX 25, 368, 9
- RESTORE_VSX 26, 384, 9
- RESTORE_VSX 27, 400, 9
- RESTORE_VSX 28, 416, 9
- RESTORE_VSX 29, 432, 9
- RESTORE_VSX 30, 448, 9
- RESTORE_VSX 31, 464, 9
-
- RESTORE_GPR 14, 112, 1
- RESTORE_GPR 15, 120, 1
- RESTORE_GPR 16, 128, 1
- RESTORE_GPR 17, 136, 1
- RESTORE_GPR 18, 144, 1
- RESTORE_GPR 19, 152, 1
- RESTORE_GPR 20, 160, 1
- RESTORE_GPR 21, 168, 1
- RESTORE_GPR 22, 176, 1
- RESTORE_GPR 23, 184, 1
- RESTORE_GPR 24, 192, 1
- RESTORE_GPR 25, 200, 1
- RESTORE_GPR 26, 208, 1
- RESTORE_GPR 27, 216, 1
- RESTORE_GPR 28, 224, 1
- RESTORE_GPR 29, 232, 1
- RESTORE_GPR 30, 240, 1
- RESTORE_GPR 31, 248, 1
-
- addi 1, 1, 752
- ld 0, 16(1)
- mtlr 0
-.endm # RESTORE_REGS
-
-#
-# p[0] = a0*r0 + a1*r4*5 + a2*r3*5 + a3*r2*5 + a4*r1*5;
-# p[1] = a0*r1 + a1*r0 + a2*r4*5 + a3*r3*5 + a4*r2*5;
-# p[2] = a0*r2 + a1*r1 + a2*r0 + a3*r4*5 + a4*r3*5;
-# p[3] = a0*r3 + a1*r2 + a2*r1 + a3*r0 + a4*r4*5;
-# p[4] = a0*r4 + a1*r3 + a2*r2 + a3*r1 + a4*r0 ;
-#
-# [r^2, r^3, r^1, r^4]
-# [m3, m2, m4, m1]
-#
-# multiply odd and even words
-.macro mul_odd
- vmulouw 14, 4, 26
- vmulouw 10, 5, 3
- vmulouw 11, 6, 2
- vmulouw 12, 7, 1
- vmulouw 13, 8, 0
- vmulouw 15, 4, 27
- vaddudm 14, 14, 10
- vaddudm 14, 14, 11
- vmulouw 10, 5, 26
- vmulouw 11, 6, 3
- vaddudm 14, 14, 12
- vaddudm 14, 14, 13 # x0
- vaddudm 15, 15, 10
- vaddudm 15, 15, 11
- vmulouw 12, 7, 2
- vmulouw 13, 8, 1
- vaddudm 15, 15, 12
- vaddudm 15, 15, 13 # x1
- vmulouw 16, 4, 28
- vmulouw 10, 5, 27
- vmulouw 11, 6, 26
- vaddudm 16, 16, 10
- vaddudm 16, 16, 11
- vmulouw 12, 7, 3
- vmulouw 13, 8, 2
- vaddudm 16, 16, 12
- vaddudm 16, 16, 13 # x2
- vmulouw 17, 4, 29
- vmulouw 10, 5, 28
- vmulouw 11, 6, 27
- vaddudm 17, 17, 10
- vaddudm 17, 17, 11
- vmulouw 12, 7, 26
- vmulouw 13, 8, 3
- vaddudm 17, 17, 12
- vaddudm 17, 17, 13 # x3
- vmulouw 18, 4, 30
- vmulouw 10, 5, 29
- vmulouw 11, 6, 28
- vaddudm 18, 18, 10
- vaddudm 18, 18, 11
- vmulouw 12, 7, 27
- vmulouw 13, 8, 26
- vaddudm 18, 18, 12
- vaddudm 18, 18, 13 # x4
-.endm
-
-.macro mul_even
- vmuleuw 9, 4, 26
- vmuleuw 10, 5, 3
- vmuleuw 11, 6, 2
- vmuleuw 12, 7, 1
- vmuleuw 13, 8, 0
- vaddudm 14, 14, 9
- vaddudm 14, 14, 10
- vaddudm 14, 14, 11
- vaddudm 14, 14, 12
- vaddudm 14, 14, 13 # x0
-
- vmuleuw 9, 4, 27
- vmuleuw 10, 5, 26
- vmuleuw 11, 6, 3
- vmuleuw 12, 7, 2
- vmuleuw 13, 8, 1
- vaddudm 15, 15, 9
- vaddudm 15, 15, 10
- vaddudm 15, 15, 11
- vaddudm 15, 15, 12
- vaddudm 15, 15, 13 # x1
-
- vmuleuw 9, 4, 28
- vmuleuw 10, 5, 27
- vmuleuw 11, 6, 26
- vmuleuw 12, 7, 3
- vmuleuw 13, 8, 2
- vaddudm 16, 16, 9
- vaddudm 16, 16, 10
- vaddudm 16, 16, 11
- vaddudm 16, 16, 12
- vaddudm 16, 16, 13 # x2
-
- vmuleuw 9, 4, 29
- vmuleuw 10, 5, 28
- vmuleuw 11, 6, 27
- vmuleuw 12, 7, 26
- vmuleuw 13, 8, 3
- vaddudm 17, 17, 9
- vaddudm 17, 17, 10
- vaddudm 17, 17, 11
- vaddudm 17, 17, 12
- vaddudm 17, 17, 13 # x3
-
- vmuleuw 9, 4, 30
- vmuleuw 10, 5, 29
- vmuleuw 11, 6, 28
- vmuleuw 12, 7, 27
- vmuleuw 13, 8, 26
- vaddudm 18, 18, 9
- vaddudm 18, 18, 10
- vaddudm 18, 18, 11
- vaddudm 18, 18, 12
- vaddudm 18, 18, 13 # x4
-.endm
-
-#
-# poly1305_setup_r
-#
-# setup r^4, r^3, r^2, r vectors
-# [r, r^3, r^2, r^4]
-# vs0 = [r0,...]
-# vs1 = [r1,...]
-# vs2 = [r2,...]
-# vs3 = [r3,...]
-# vs4 = [r4,...]
-# vs5 = [r4*5,...]
-# vs6 = [r3*5,...]
-# vs7 = [r2*5,...]
-# vs8 = [r1*5,...]
-#
-# r0, r4*5, r3*5, r2*5, r1*5;
-# r1, r0, r4*5, r3*5, r2*5;
-# r2, r1, r0, r4*5, r3*5;
-# r3, r2, r1, r0, r4*5;
-# r4, r3, r2, r1, r0 ;
-#
-.macro poly1305_setup_r
-
- # save r
- xxlor 26, 58, 58
- xxlor 27, 59, 59
- xxlor 28, 60, 60
- xxlor 29, 61, 61
- xxlor 30, 62, 62
-
- xxlxor 31, 31, 31
-
-# [r, r^3, r^2, r^4]
- # compute r^2
- vmr 4, 26
- vmr 5, 27
- vmr 6, 28
- vmr 7, 29
- vmr 8, 30
- bl do_mul # r^2 r^1
- xxpermdi 58, 58, 36, 0x3 # r0
- xxpermdi 59, 59, 37, 0x3 # r1
- xxpermdi 60, 60, 38, 0x3 # r2
- xxpermdi 61, 61, 39, 0x3 # r3
- xxpermdi 62, 62, 40, 0x3 # r4
- xxpermdi 36, 36, 36, 0x3
- xxpermdi 37, 37, 37, 0x3
- xxpermdi 38, 38, 38, 0x3
- xxpermdi 39, 39, 39, 0x3
- xxpermdi 40, 40, 40, 0x3
- vspltisb 13, 2
- vsld 9, 27, 13
- vsld 10, 28, 13
- vsld 11, 29, 13
- vsld 12, 30, 13
- vaddudm 0, 9, 27
- vaddudm 1, 10, 28
- vaddudm 2, 11, 29
- vaddudm 3, 12, 30
-
- bl do_mul # r^4 r^3
- vmrgow 26, 26, 4
- vmrgow 27, 27, 5
- vmrgow 28, 28, 6
- vmrgow 29, 29, 7
- vmrgow 30, 30, 8
- vspltisb 13, 2
- vsld 9, 27, 13
- vsld 10, 28, 13
- vsld 11, 29, 13
- vsld 12, 30, 13
- vaddudm 0, 9, 27
- vaddudm 1, 10, 28
- vaddudm 2, 11, 29
- vaddudm 3, 12, 30
-
- # r^2 r^4
- xxlor 0, 58, 58
- xxlor 1, 59, 59
- xxlor 2, 60, 60
- xxlor 3, 61, 61
- xxlor 4, 62, 62
- xxlor 5, 32, 32
- xxlor 6, 33, 33
- xxlor 7, 34, 34
- xxlor 8, 35, 35
-
- vspltw 9, 26, 3
- vspltw 10, 26, 2
- vmrgow 26, 10, 9
- vspltw 9, 27, 3
- vspltw 10, 27, 2
- vmrgow 27, 10, 9
- vspltw 9, 28, 3
- vspltw 10, 28, 2
- vmrgow 28, 10, 9
- vspltw 9, 29, 3
- vspltw 10, 29, 2
- vmrgow 29, 10, 9
- vspltw 9, 30, 3
- vspltw 10, 30, 2
- vmrgow 30, 10, 9
-
- vsld 9, 27, 13
- vsld 10, 28, 13
- vsld 11, 29, 13
- vsld 12, 30, 13
- vaddudm 0, 9, 27
- vaddudm 1, 10, 28
- vaddudm 2, 11, 29
- vaddudm 3, 12, 30
-.endm
-
-SYM_FUNC_START_LOCAL(do_mul)
- mul_odd
-
- # do reduction ( h %= p )
- # carry reduction
- vspltisb 9, 2
- vsrd 10, 14, 31
- vsrd 11, 17, 31
- vand 7, 17, 25
- vand 4, 14, 25
- vaddudm 18, 18, 11
- vsrd 12, 18, 31
- vaddudm 15, 15, 10
-
- vsrd 11, 15, 31
- vand 8, 18, 25
- vand 5, 15, 25
- vaddudm 4, 4, 12
- vsld 10, 12, 9
- vaddudm 6, 16, 11
-
- vsrd 13, 6, 31
- vand 6, 6, 25
- vaddudm 4, 4, 10
- vsrd 10, 4, 31
- vaddudm 7, 7, 13
-
- vsrd 11, 7, 31
- vand 7, 7, 25
- vand 4, 4, 25
- vaddudm 5, 5, 10
- vaddudm 8, 8, 11
- blr
-SYM_FUNC_END(do_mul)
-
-#
-# init key
-#
-.macro do_poly1305_init
- addis 10, 2, rmask@toc@ha
- addi 10, 10, rmask@toc@l
-
- ld 11, 0(10)
- ld 12, 8(10)
-
- li 14, 16
- li 15, 32
- addis 10, 2, cnum@toc@ha
- addi 10, 10, cnum@toc@l
- lvx 25, 0, 10 # v25 - mask
- lvx 31, 14, 10 # v31 = 1a
- lvx 19, 15, 10 # v19 = 1 << 24
- lxv 24, 48(10) # vs24
- lxv 25, 64(10) # vs25
-
- # initialize
- # load key from r3 to vectors
- ld 9, 24(3)
- ld 10, 32(3)
- and. 9, 9, 11
- and. 10, 10, 12
-
- # break 26 bits
- extrdi 14, 9, 26, 38
- extrdi 15, 9, 26, 12
- extrdi 16, 9, 12, 0
- mtvsrdd 58, 0, 14
- insrdi 16, 10, 14, 38
- mtvsrdd 59, 0, 15
- extrdi 17, 10, 26, 24
- mtvsrdd 60, 0, 16
- extrdi 18, 10, 24, 0
- mtvsrdd 61, 0, 17
- mtvsrdd 62, 0, 18
-
- # r1 = r1 * 5, r2 = r2 * 5, r3 = r3 * 5, r4 = r4 * 5
- li 9, 5
- mtvsrdd 36, 0, 9
- vmulouw 0, 27, 4 # v0 = rr0
- vmulouw 1, 28, 4 # v1 = rr1
- vmulouw 2, 29, 4 # v2 = rr2
- vmulouw 3, 30, 4 # v3 = rr3
-.endm
-
-#
-# poly1305_p10le_4blocks( uint8_t *k, uint32_t mlen, uint8_t *m)
-# k = 32 bytes key
-# r3 = k (r, s)
-# r4 = mlen
-# r5 = m
-#
-SYM_FUNC_START(poly1305_p10le_4blocks)
-.align 5
- cmpdi 5, 64
- blt Out_no_poly1305
-
- SAVE_REGS
-
- do_poly1305_init
-
- li 21, 0 # counter to message
-
- poly1305_setup_r
-
- # load previous H state
- # break/convert r6 to 26 bits
- ld 9, 0(3)
- ld 10, 8(3)
- ld 19, 16(3)
- sldi 19, 19, 24
- mtvsrdd 41, 0, 19
- extrdi 14, 9, 26, 38
- extrdi 15, 9, 26, 12
- extrdi 16, 9, 12, 0
- mtvsrdd 36, 0, 14
- insrdi 16, 10, 14, 38
- mtvsrdd 37, 0, 15
- extrdi 17, 10, 26, 24
- mtvsrdd 38, 0, 16
- extrdi 18, 10, 24, 0
- mtvsrdd 39, 0, 17
- mtvsrdd 40, 0, 18
- vor 8, 8, 9
-
- # input m1 m2
- add 20, 4, 21
- xxlor 49, 24, 24
- xxlor 50, 25, 25
- lxvw4x 43, 0, 20
- addi 17, 20, 16
- lxvw4x 44, 0, 17
- vperm 14, 11, 12, 17
- vperm 15, 11, 12, 18
- vand 9, 14, 25 # a0
- vsrd 10, 14, 31 # >> 26
- vsrd 11, 10, 31 # 12 bits left
- vand 10, 10, 25 # a1
- vspltisb 13, 12
- vand 16, 15, 25
- vsld 12, 16, 13
- vor 11, 11, 12
- vand 11, 11, 25 # a2
- vspltisb 13, 14
- vsrd 12, 15, 13 # >> 14
- vsrd 13, 12, 31 # >> 26, a4
- vand 12, 12, 25 # a3
-
- vaddudm 20, 4, 9
- vaddudm 21, 5, 10
- vaddudm 22, 6, 11
- vaddudm 23, 7, 12
- vaddudm 24, 8, 13
-
- # m3 m4
- addi 17, 17, 16
- lxvw4x 43, 0, 17
- addi 17, 17, 16
- lxvw4x 44, 0, 17
- vperm 14, 11, 12, 17
- vperm 15, 11, 12, 18
- vand 9, 14, 25 # a0
- vsrd 10, 14, 31 # >> 26
- vsrd 11, 10, 31 # 12 bits left
- vand 10, 10, 25 # a1
- vspltisb 13, 12
- vand 16, 15, 25
- vsld 12, 16, 13
- vspltisb 13, 14
- vor 11, 11, 12
- vand 11, 11, 25 # a2
- vsrd 12, 15, 13 # >> 14
- vsrd 13, 12, 31 # >> 26, a4
- vand 12, 12, 25 # a3
-
- # Smash 4 message blocks into 5 vectors of [m4, m2, m3, m1]
- vmrgow 4, 9, 20
- vmrgow 5, 10, 21
- vmrgow 6, 11, 22
- vmrgow 7, 12, 23
- vmrgow 8, 13, 24
- vaddudm 8, 8, 19
-
- addi 5, 5, -64 # len -= 64
- addi 21, 21, 64 # offset += 64
-
- li 9, 64
- divdu 31, 5, 9
-
- cmpdi 31, 0
- ble Skip_block_loop
-
- mtctr 31
-
-# h4 = m1 * r⁴ + m2 * r³ + m3 * r² + m4 * r
-# Rewrite the polynominal sum of product as follows,
-# h1 = (h0 + m1) * r^2, h2 = (h0 + m2) * r^2
-# h3 = (h1 + m3) * r^2, h4 = (h2 + m4) * r^2 --> (h0 + m1) r*4 + (h3 + m3) r^2, (h0 + m2) r^4 + (h0 + m4) r^2
-# .... Repeat
-# h5 = (h3 + m5) * r^2, h6 = (h4 + m6) * r^2 -->
-# h7 = (h5 + m7) * r^2, h8 = (h6 + m8) * r^1 --> m5 * r^4 + m6 * r^3 + m7 * r^2 + m8 * r
-#
-loop_4blocks:
-
- # Multiply odd words and even words
- mul_odd
- mul_even
- # carry reduction
- vspltisb 9, 2
- vsrd 10, 14, 31
- vsrd 11, 17, 31
- vand 7, 17, 25
- vand 4, 14, 25
- vaddudm 18, 18, 11
- vsrd 12, 18, 31
- vaddudm 15, 15, 10
-
- vsrd 11, 15, 31
- vand 8, 18, 25
- vand 5, 15, 25
- vaddudm 4, 4, 12
- vsld 10, 12, 9
- vaddudm 6, 16, 11
-
- vsrd 13, 6, 31
- vand 6, 6, 25
- vaddudm 4, 4, 10
- vsrd 10, 4, 31
- vaddudm 7, 7, 13
-
- vsrd 11, 7, 31
- vand 7, 7, 25
- vand 4, 4, 25
- vaddudm 5, 5, 10
- vaddudm 8, 8, 11
-
- # input m1 m2 m3 m4
- add 20, 4, 21
- xxlor 49, 24, 24
- xxlor 50, 25, 25
- lxvw4x 43, 0, 20
- addi 17, 20, 16
- lxvw4x 44, 0, 17
- vperm 14, 11, 12, 17
- vperm 15, 11, 12, 18
- addi 17, 17, 16
- lxvw4x 43, 0, 17
- addi 17, 17, 16
- lxvw4x 44, 0, 17
- vperm 17, 11, 12, 17
- vperm 18, 11, 12, 18
-
- vand 20, 14, 25 # a0
- vand 9, 17, 25 # a0
- vsrd 21, 14, 31 # >> 26
- vsrd 22, 21, 31 # 12 bits left
- vsrd 10, 17, 31 # >> 26
- vsrd 11, 10, 31 # 12 bits left
-
- vand 21, 21, 25 # a1
- vand 10, 10, 25 # a1
-
- vspltisb 13, 12
- vand 16, 15, 25
- vsld 23, 16, 13
- vor 22, 22, 23
- vand 22, 22, 25 # a2
- vand 16, 18, 25
- vsld 12, 16, 13
- vor 11, 11, 12
- vand 11, 11, 25 # a2
- vspltisb 13, 14
- vsrd 23, 15, 13 # >> 14
- vsrd 24, 23, 31 # >> 26, a4
- vand 23, 23, 25 # a3
- vsrd 12, 18, 13 # >> 14
- vsrd 13, 12, 31 # >> 26, a4
- vand 12, 12, 25 # a3
-
- vaddudm 4, 4, 20
- vaddudm 5, 5, 21
- vaddudm 6, 6, 22
- vaddudm 7, 7, 23
- vaddudm 8, 8, 24
-
- # Smash 4 message blocks into 5 vectors of [m4, m2, m3, m1]
- vmrgow 4, 9, 4
- vmrgow 5, 10, 5
- vmrgow 6, 11, 6
- vmrgow 7, 12, 7
- vmrgow 8, 13, 8
- vaddudm 8, 8, 19
-
- addi 5, 5, -64 # len -= 64
- addi 21, 21, 64 # offset += 64
-
- bdnz loop_4blocks
-
-Skip_block_loop:
- xxlor 58, 0, 0
- xxlor 59, 1, 1
- xxlor 60, 2, 2
- xxlor 61, 3, 3
- xxlor 62, 4, 4
- xxlor 32, 5, 5
- xxlor 33, 6, 6
- xxlor 34, 7, 7
- xxlor 35, 8, 8
-
- # Multiply odd words and even words
- mul_odd
- mul_even
-
- # Sum the products.
- xxpermdi 41, 31, 46, 0
- xxpermdi 42, 31, 47, 0
- vaddudm 4, 14, 9
- xxpermdi 36, 31, 36, 3
- vaddudm 5, 15, 10
- xxpermdi 37, 31, 37, 3
- xxpermdi 43, 31, 48, 0
- vaddudm 6, 16, 11
- xxpermdi 38, 31, 38, 3
- xxpermdi 44, 31, 49, 0
- vaddudm 7, 17, 12
- xxpermdi 39, 31, 39, 3
- xxpermdi 45, 31, 50, 0
- vaddudm 8, 18, 13
- xxpermdi 40, 31, 40, 3
-
- # carry reduction
- vspltisb 9, 2
- vsrd 10, 4, 31
- vsrd 11, 7, 31
- vand 7, 7, 25
- vand 4, 4, 25
- vaddudm 8, 8, 11
- vsrd 12, 8, 31
- vaddudm 5, 5, 10
-
- vsrd 11, 5, 31
- vand 8, 8, 25
- vand 5, 5, 25
- vaddudm 4, 4, 12
- vsld 10, 12, 9
- vaddudm 6, 6, 11
-
- vsrd 13, 6, 31
- vand 6, 6, 25
- vaddudm 4, 4, 10
- vsrd 10, 4, 31
- vaddudm 7, 7, 13
-
- vsrd 11, 7, 31
- vand 7, 7, 25
- vand 4, 4, 25
- vaddudm 5, 5, 10
- vsrd 10, 5, 31
- vand 5, 5, 25
- vaddudm 6, 6, 10
- vaddudm 8, 8, 11
-
- b do_final_update
-
-do_final_update:
- # combine 26 bit limbs
- # v4, v5, v6, v7 and v8 are 26 bit vectors
- vsld 5, 5, 31
- vor 20, 4, 5
- vspltisb 11, 12
- vsrd 12, 6, 11
- vsld 6, 6, 31
- vsld 6, 6, 31
- vor 20, 20, 6
- vspltisb 11, 14
- vsld 7, 7, 11
- vor 21, 7, 12
- mfvsrld 16, 40 # save last 2 bytes
- vsld 8, 8, 11
- vsld 8, 8, 31
- vor 21, 21, 8
- mfvsrld 17, 52
- mfvsrld 19, 53
- srdi 16, 16, 24
-
- std 17, 0(3)
- std 19, 8(3)
- stw 16, 16(3)
-
-Out_loop:
- li 3, 0
-
- RESTORE_REGS
-
- blr
-
-Out_no_poly1305:
- li 3, 0
- blr
-SYM_FUNC_END(poly1305_p10le_4blocks)
-
-#
-# =======================================================================
-# The following functions implement 64 x 64 bits multiplication poly1305.
-#
-SYM_FUNC_START_LOCAL(Poly1305_init_64)
- # mask 0x0FFFFFFC0FFFFFFC
- # mask 0x0FFFFFFC0FFFFFFF
- addis 10, 2, rmask@toc@ha
- addi 10, 10, rmask@toc@l
- ld 11, 0(10)
- ld 12, 8(10)
-
- # initialize
- # load key from r3
- ld 9, 24(3)
- ld 10, 32(3)
- and. 9, 9, 11 # cramp mask r0
- and. 10, 10, 12 # cramp mask r1
-
- srdi 21, 10, 2
- add 19, 21, 10 # s1: r19 - (r1 >> 2) *5
-
- # setup r and s
- li 25, 0
- mtvsrdd 32+0, 9, 19 # r0, s1
- mtvsrdd 32+1, 10, 9 # r1, r0
- mtvsrdd 32+2, 19, 25 # s1
- mtvsrdd 32+3, 9, 25 # r0
-
- blr
-SYM_FUNC_END(Poly1305_init_64)
-
-# Poly1305_mult
-# v6 = (h0, h1), v8 = h2
-# v0 = (r0, s1), v1 = (r1, r0), v2 = s1, v3 = r0
-#
-# Output: v7, v10, v11
-#
-SYM_FUNC_START_LOCAL(Poly1305_mult)
- #
- # d0 = h0 * r0 + h1 * s1
- vmsumudm 7, 6, 0, 9 # h0 * r0, h1 * s1
-
- # d1 = h0 * r1 + h1 * r0 + h2 * s1
- vmsumudm 11, 6, 1, 9 # h0 * r1, h1 * r0
- vmsumudm 10, 8, 2, 11 # d1 += h2 * s1
-
- # d2 = r0
- vmsumudm 11, 8, 3, 9 # d2 = h2 * r0
- blr
-SYM_FUNC_END(Poly1305_mult)
-
-#
-# carry reduction
-# h %=p
-#
-# Input: v7, v10, v11
-# Output: r27, r28, r29
-#
-SYM_FUNC_START_LOCAL(Carry_reduction)
- mfvsrld 27, 32+7
- mfvsrld 28, 32+10
- mfvsrld 29, 32+11
- mfvsrd 20, 32+7 # h0.h
- mfvsrd 21, 32+10 # h1.h
-
- addc 28, 28, 20
- adde 29, 29, 21
- srdi 22, 29, 0x2
- sldi 23, 22, 0x2
- add 23, 23, 22 # (h2 & 3) * 5
- addc 27, 27, 23 # h0
- addze 28, 28 # h1
- andi. 29, 29, 0x3 # h2
- blr
-SYM_FUNC_END(Carry_reduction)
-
-#
-# poly1305 multiplication
-# h *= r, h %= p
-# d0 = h0 * r0 + h1 * s1
-# d1 = h0 * r1 + h1 * r0 + h2 * s1
-# d2 = h0 * r0
-#
-#
-# unsigned int poly1305_test_64s(unisgned char *state, const byte *src, size_t len, highbit)
-# - no highbit if final leftover block (highbit = 0)
-#
-SYM_FUNC_START(poly1305_64s)
- cmpdi 5, 0
- ble Out_no_poly1305_64
-
- mflr 0
- std 0, 16(1)
- stdu 1,-400(1)
-
- SAVE_GPR 14, 112, 1
- SAVE_GPR 15, 120, 1
- SAVE_GPR 16, 128, 1
- SAVE_GPR 17, 136, 1
- SAVE_GPR 18, 144, 1
- SAVE_GPR 19, 152, 1
- SAVE_GPR 20, 160, 1
- SAVE_GPR 21, 168, 1
- SAVE_GPR 22, 176, 1
- SAVE_GPR 23, 184, 1
- SAVE_GPR 24, 192, 1
- SAVE_GPR 25, 200, 1
- SAVE_GPR 26, 208, 1
- SAVE_GPR 27, 216, 1
- SAVE_GPR 28, 224, 1
- SAVE_GPR 29, 232, 1
- SAVE_GPR 30, 240, 1
- SAVE_GPR 31, 248, 1
-
- # Init poly1305
- bl Poly1305_init_64
-
- li 25, 0 # offset to inp and outp
-
- add 11, 25, 4
-
- # load h
- # h0, h1, h2?
- ld 27, 0(3)
- ld 28, 8(3)
- lwz 29, 16(3)
-
- li 30, 16
- divdu 31, 5, 30
-
- mtctr 31
-
- mr 24, 6 # highbit
-
-Loop_block_64:
- vxor 9, 9, 9
-
- ld 20, 0(11)
- ld 21, 8(11)
- addi 11, 11, 16
-
- addc 27, 27, 20
- adde 28, 28, 21
- adde 29, 29, 24
-
- li 22, 0
- mtvsrdd 32+6, 27, 28 # h0, h1
- mtvsrdd 32+8, 29, 22 # h2
-
- bl Poly1305_mult
-
- bl Carry_reduction
-
- bdnz Loop_block_64
-
- std 27, 0(3)
- std 28, 8(3)
- stw 29, 16(3)
-
- li 3, 0
-
- RESTORE_GPR 14, 112, 1
- RESTORE_GPR 15, 120, 1
- RESTORE_GPR 16, 128, 1
- RESTORE_GPR 17, 136, 1
- RESTORE_GPR 18, 144, 1
- RESTORE_GPR 19, 152, 1
- RESTORE_GPR 20, 160, 1
- RESTORE_GPR 21, 168, 1
- RESTORE_GPR 22, 176, 1
- RESTORE_GPR 23, 184, 1
- RESTORE_GPR 24, 192, 1
- RESTORE_GPR 25, 200, 1
- RESTORE_GPR 26, 208, 1
- RESTORE_GPR 27, 216, 1
- RESTORE_GPR 28, 224, 1
- RESTORE_GPR 29, 232, 1
- RESTORE_GPR 30, 240, 1
- RESTORE_GPR 31, 248, 1
-
- addi 1, 1, 400
- ld 0, 16(1)
- mtlr 0
-
- blr
-
-Out_no_poly1305_64:
- li 3, 0
- blr
-SYM_FUNC_END(poly1305_64s)
-
-#
-# Input: r3 = h, r4 = s, r5 = mac
-# mac = h + s
-#
-SYM_FUNC_START(poly1305_emit_64)
- ld 10, 0(3)
- ld 11, 8(3)
- ld 12, 16(3)
-
- # compare modulus
- # h + 5 + (-p)
- mr 6, 10
- mr 7, 11
- mr 8, 12
- addic. 6, 6, 5
- addze 7, 7
- addze 8, 8
- srdi 9, 8, 2 # overflow?
- cmpdi 9, 0
- beq Skip_h64
- mr 10, 6
- mr 11, 7
- mr 12, 8
-
-Skip_h64:
- ld 6, 0(4)
- ld 7, 8(4)
- addc 10, 10, 6
- adde 11, 11, 7
- addze 12, 12
-
- std 10, 0(5)
- std 11, 8(5)
- blr
-SYM_FUNC_END(poly1305_emit_64)
-
-SYM_DATA_START_LOCAL(RMASK)
-.align 5
-rmask:
-.byte 0xff, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f
-cnum:
-.long 0x03ffffff, 0x00000000, 0x03ffffff, 0x00000000
-.long 0x1a, 0x00, 0x1a, 0x00
-.long 0x01000000, 0x01000000, 0x01000000, 0x01000000
-.long 0x00010203, 0x04050607, 0x10111213, 0x14151617
-.long 0x08090a0b, 0x0c0d0e0f, 0x18191a1b, 0x1c1d1e1f
-SYM_DATA_END(RMASK)
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index 9170892a8557..04c88e173ce1 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -7,16 +7,13 @@
* Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
*/
+#include <asm/switch_to.h>
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
-#include <asm/switch_to.h>
-#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/preempt.h>
+#include <linux/module.h>
/*
* MAX_BYTES defines the number of bytes that are allowed to be processed
@@ -30,7 +27,7 @@
*/
#define MAX_BYTES 2048
-extern void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks);
+asmlinkage void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks);
static void spe_begin(void)
{
@@ -46,126 +43,45 @@ static void spe_end(void)
preempt_enable();
}
-static inline void ppc_sha1_clear_context(struct sha1_state *sctx)
+static void ppc_spe_sha1_block(struct sha1_state *sctx, const u8 *src,
+ int blocks)
{
- int count = sizeof(struct sha1_state) >> 2;
- u32 *ptr = (u32 *)sctx;
-
- /* make sure we can clear the fast way */
- BUILD_BUG_ON(sizeof(struct sha1_state) % 4);
- do { *ptr++ = 0; } while (--count);
-}
-
-static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->count & 0x3f;
- const unsigned int avail = 64 - offset;
- unsigned int bytes;
- const u8 *src = data;
-
- if (avail > len) {
- sctx->count += len;
- memcpy((char *)sctx->buffer + offset, src, len);
- return 0;
- }
-
- sctx->count += len;
-
- if (offset) {
- memcpy((char *)sctx->buffer + offset, src, avail);
+ do {
+ int unit = min(blocks, MAX_BYTES / SHA1_BLOCK_SIZE);
spe_begin();
- ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1);
+ ppc_spe_sha1_transform(sctx->state, src, unit);
spe_end();
- len -= avail;
- src += avail;
- }
-
- while (len > 63) {
- bytes = (len > MAX_BYTES) ? MAX_BYTES : len;
- bytes = bytes & ~0x3f;
-
- spe_begin();
- ppc_spe_sha1_transform(sctx->state, src, bytes >> 6);
- spe_end();
-
- src += bytes;
- len -= bytes;
- }
-
- memcpy((char *)sctx->buffer, src, len);
- return 0;
-}
-
-static int ppc_spe_sha1_final(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->count & 0x3f;
- char *p = (char *)sctx->buffer + offset;
- int padlen;
- __be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56);
- __be32 *dst = (__be32 *)out;
-
- padlen = 55 - offset;
- *p++ = 0x80;
-
- spe_begin();
-
- if (padlen < 0) {
- memset(p, 0x00, padlen + sizeof (u64));
- ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
- p = (char *)sctx->buffer;
- padlen = 56;
- }
-
- memset(p, 0, padlen);
- *pbits = cpu_to_be64(sctx->count << 3);
- ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
-
- spe_end();
-
- dst[0] = cpu_to_be32(sctx->state[0]);
- dst[1] = cpu_to_be32(sctx->state[1]);
- dst[2] = cpu_to_be32(sctx->state[2]);
- dst[3] = cpu_to_be32(sctx->state[3]);
- dst[4] = cpu_to_be32(sctx->state[4]);
-
- ppc_sha1_clear_context(sctx);
- return 0;
+ src += unit * SHA1_BLOCK_SIZE;
+ blocks -= unit;
+ } while (blocks);
}
-static int ppc_spe_sha1_export(struct shash_desc *desc, void *out)
+static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
+ return sha1_base_do_update_blocks(desc, data, len, ppc_spe_sha1_block);
}
-static int ppc_spe_sha1_import(struct shash_desc *desc, const void *in)
+static int ppc_spe_sha1_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ sha1_base_do_finup(desc, src, len, ppc_spe_sha1_block);
+ return sha1_base_finish(desc, out);
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = ppc_spe_sha1_update,
- .final = ppc_spe_sha1_final,
- .export = ppc_spe_sha1_export,
- .import = ppc_spe_sha1_import,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = ppc_spe_sha1_finup,
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ppc-spe",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index f283bbd3f121..4593946aa9b3 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -13,107 +13,46 @@
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
-
-void powerpc_sha_transform(u32 *state, const u8 *src);
-
-static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
-
- if ((partial + len) > 63) {
-
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data, done + 64);
- src = sctx->buffer;
- }
-
- do {
- powerpc_sha_transform(sctx->state, src);
- done += 64;
- src = data + done;
- } while (done + 63 < len);
-
- partial = 0;
- }
- memcpy(sctx->buffer + partial, src, len - done);
-
- return 0;
-}
+#include <linux/kernel.h>
+#include <linux/module.h>
+asmlinkage void powerpc_sha_transform(u32 *state, const u8 *src);
-/* Add padding and return the message digest. */
-static int powerpc_sha1_final(struct shash_desc *desc, u8 *out)
+static void powerpc_sha_block(struct sha1_state *sctx, const u8 *data,
+ int blocks)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- u32 i, index, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 */
- index = sctx->count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- powerpc_sha1_update(desc, padding, padlen);
-
- /* Append length */
- powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof *sctx);
-
- return 0;
+ do {
+ powerpc_sha_transform(sctx->state, data);
+ data += 64;
+ } while (--blocks);
}
-static int powerpc_sha1_export(struct shash_desc *desc, void *out)
+static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
+ return sha1_base_do_update_blocks(desc, data, len, powerpc_sha_block);
}
-static int powerpc_sha1_import(struct shash_desc *desc, const void *in)
+/* Add padding and return the message digest. */
+static int powerpc_sha1_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ sha1_base_do_finup(desc, src, len, powerpc_sha_block);
+ return sha1_base_finish(desc, out);
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = powerpc_sha1_update,
- .final = powerpc_sha1_final,
- .export = powerpc_sha1_export,
- .import = powerpc_sha1_import,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = powerpc_sha1_finup,
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-powerpc",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha256-spe-asm.S b/arch/powerpc/crypto/sha256-spe-asm.S
deleted file mode 100644
index cd99d71dae34..000000000000
--- a/arch/powerpc/crypto/sha256-spe-asm.S
+++ /dev/null
@@ -1,318 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Fast SHA-256 implementation for SPE instruction set (PPC)
- *
- * This code makes use of the SPE SIMD instruction set as defined in
- * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf
- * Implementation is based on optimization guide notes from
- * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf
- *
- * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-
-#define rHP r3 /* pointer to hash values in memory */
-#define rKP r24 /* pointer to round constants */
-#define rWP r4 /* pointer to input data */
-
-#define rH0 r5 /* 8 32 bit hash values in 8 registers */
-#define rH1 r6
-#define rH2 r7
-#define rH3 r8
-#define rH4 r9
-#define rH5 r10
-#define rH6 r11
-#define rH7 r12
-
-#define rW0 r14 /* 64 bit registers. 16 words in 8 registers */
-#define rW1 r15
-#define rW2 r16
-#define rW3 r17
-#define rW4 r18
-#define rW5 r19
-#define rW6 r20
-#define rW7 r21
-
-#define rT0 r22 /* 64 bit temporaries */
-#define rT1 r23
-#define rT2 r0 /* 32 bit temporaries */
-#define rT3 r25
-
-#define CMP_KN_LOOP
-#define CMP_KC_LOOP \
- cmpwi rT1,0;
-
-#define INITIALIZE \
- stwu r1,-128(r1); /* create stack frame */ \
- evstdw r14,8(r1); /* We must save non volatile */ \
- evstdw r15,16(r1); /* registers. Take the chance */ \
- evstdw r16,24(r1); /* and save the SPE part too */ \
- evstdw r17,32(r1); \
- evstdw r18,40(r1); \
- evstdw r19,48(r1); \
- evstdw r20,56(r1); \
- evstdw r21,64(r1); \
- evstdw r22,72(r1); \
- evstdw r23,80(r1); \
- stw r24,88(r1); /* save normal registers */ \
- stw r25,92(r1);
-
-
-#define FINALIZE \
- evldw r14,8(r1); /* restore SPE registers */ \
- evldw r15,16(r1); \
- evldw r16,24(r1); \
- evldw r17,32(r1); \
- evldw r18,40(r1); \
- evldw r19,48(r1); \
- evldw r20,56(r1); \
- evldw r21,64(r1); \
- evldw r22,72(r1); \
- evldw r23,80(r1); \
- lwz r24,88(r1); /* restore normal registers */ \
- lwz r25,92(r1); \
- xor r0,r0,r0; \
- stw r0,8(r1); /* Delete sensitive data */ \
- stw r0,16(r1); /* that we might have pushed */ \
- stw r0,24(r1); /* from other context that runs */ \
- stw r0,32(r1); /* the same code. Assume that */ \
- stw r0,40(r1); /* the lower part of the GPRs */ \
- stw r0,48(r1); /* was already overwritten on */ \
- stw r0,56(r1); /* the way down to here */ \
- stw r0,64(r1); \
- stw r0,72(r1); \
- stw r0,80(r1); \
- addi r1,r1,128; /* cleanup stack frame */
-
-#ifdef __BIG_ENDIAN__
-#define LOAD_DATA(reg, off) \
- lwz reg,off(rWP); /* load data */
-#define NEXT_BLOCK \
- addi rWP,rWP,64; /* increment per block */
-#else
-#define LOAD_DATA(reg, off) \
- lwbrx reg,0,rWP; /* load data */ \
- addi rWP,rWP,4; /* increment per word */
-#define NEXT_BLOCK /* nothing to do */
-#endif
-
-#define R_LOAD_W(a, b, c, d, e, f, g, h, w, off) \
- LOAD_DATA(w, off) /* 1: W */ \
- rotrwi rT0,e,6; /* 1: S1 = e rotr 6 */ \
- rotrwi rT1,e,11; /* 1: S1' = e rotr 11 */ \
- rotrwi rT2,e,25; /* 1: S1" = e rotr 25 */ \
- xor rT0,rT0,rT1; /* 1: S1 = S1 xor S1' */ \
- and rT3,e,f; /* 1: ch = e and f */ \
- xor rT0,rT0,rT2; /* 1: S1 = S1 xor S1" */ \
- andc rT1,g,e; /* 1: ch' = ~e and g */ \
- lwz rT2,off(rKP); /* 1: K */ \
- xor rT3,rT3,rT1; /* 1: ch = ch xor ch' */ \
- add h,h,rT0; /* 1: temp1 = h + S1 */ \
- add rT3,rT3,w; /* 1: temp1' = ch + w */ \
- rotrwi rT0,a,2; /* 1: S0 = a rotr 2 */ \
- add h,h,rT3; /* 1: temp1 = temp1 + temp1' */ \
- rotrwi rT1,a,13; /* 1: S0' = a rotr 13 */ \
- add h,h,rT2; /* 1: temp1 = temp1 + K */ \
- rotrwi rT3,a,22; /* 1: S0" = a rotr 22 */ \
- xor rT0,rT0,rT1; /* 1: S0 = S0 xor S0' */ \
- add d,d,h; /* 1: d = d + temp1 */ \
- xor rT3,rT0,rT3; /* 1: S0 = S0 xor S0" */ \
- evmergelo w,w,w; /* shift W */ \
- or rT2,a,b; /* 1: maj = a or b */ \
- and rT1,a,b; /* 1: maj' = a and b */ \
- and rT2,rT2,c; /* 1: maj = maj and c */ \
- LOAD_DATA(w, off+4) /* 2: W */ \
- or rT2,rT1,rT2; /* 1: maj = maj or maj' */ \
- rotrwi rT0,d,6; /* 2: S1 = e rotr 6 */ \
- add rT3,rT3,rT2; /* 1: temp2 = S0 + maj */ \
- rotrwi rT1,d,11; /* 2: S1' = e rotr 11 */ \
- add h,h,rT3; /* 1: h = temp1 + temp2 */ \
- rotrwi rT2,d,25; /* 2: S1" = e rotr 25 */ \
- xor rT0,rT0,rT1; /* 2: S1 = S1 xor S1' */ \
- and rT3,d,e; /* 2: ch = e and f */ \
- xor rT0,rT0,rT2; /* 2: S1 = S1 xor S1" */ \
- andc rT1,f,d; /* 2: ch' = ~e and g */ \
- lwz rT2,off+4(rKP); /* 2: K */ \
- xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \
- add g,g,rT0; /* 2: temp1 = h + S1 */ \
- add rT3,rT3,w; /* 2: temp1' = ch + w */ \
- rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \
- add g,g,rT3; /* 2: temp1 = temp1 + temp1' */ \
- rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \
- add g,g,rT2; /* 2: temp1 = temp1 + K */ \
- rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \
- xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \
- or rT2,h,a; /* 2: maj = a or b */ \
- xor rT3,rT0,rT3; /* 2: S0 = S0 xor S0" */ \
- and rT1,h,a; /* 2: maj' = a and b */ \
- and rT2,rT2,b; /* 2: maj = maj and c */ \
- add c,c,g; /* 2: d = d + temp1 */ \
- or rT2,rT1,rT2; /* 2: maj = maj or maj' */ \
- add rT3,rT3,rT2; /* 2: temp2 = S0 + maj */ \
- add g,g,rT3 /* 2: h = temp1 + temp2 */
-
-#define R_CALC_W(a, b, c, d, e, f, g, h, w0, w1, w4, w5, w7, k, off) \
- rotrwi rT2,e,6; /* 1: S1 = e rotr 6 */ \
- evmergelohi rT0,w0,w1; /* w[-15] */ \
- rotrwi rT3,e,11; /* 1: S1' = e rotr 11 */ \
- evsrwiu rT1,rT0,3; /* s0 = w[-15] >> 3 */ \
- xor rT2,rT2,rT3; /* 1: S1 = S1 xor S1' */ \
- evrlwi rT0,rT0,25; /* s0' = w[-15] rotr 7 */ \
- rotrwi rT3,e,25; /* 1: S1' = e rotr 25 */ \
- evxor rT1,rT1,rT0; /* s0 = s0 xor s0' */ \
- xor rT2,rT2,rT3; /* 1: S1 = S1 xor S1' */ \
- evrlwi rT0,rT0,21; /* s0' = w[-15] rotr 18 */ \
- add h,h,rT2; /* 1: temp1 = h + S1 */ \
- evxor rT0,rT0,rT1; /* s0 = s0 xor s0' */ \
- and rT2,e,f; /* 1: ch = e and f */ \
- evaddw w0,w0,rT0; /* w = w[-16] + s0 */ \
- andc rT3,g,e; /* 1: ch' = ~e and g */ \
- evsrwiu rT0,w7,10; /* s1 = w[-2] >> 10 */ \
- xor rT2,rT2,rT3; /* 1: ch = ch xor ch' */ \
- evrlwi rT1,w7,15; /* s1' = w[-2] rotr 17 */ \
- add h,h,rT2; /* 1: temp1 = temp1 + ch */ \
- evxor rT0,rT0,rT1; /* s1 = s1 xor s1' */ \
- rotrwi rT2,a,2; /* 1: S0 = a rotr 2 */ \
- evrlwi rT1,w7,13; /* s1' = w[-2] rotr 19 */ \
- rotrwi rT3,a,13; /* 1: S0' = a rotr 13 */ \
- evxor rT0,rT0,rT1; /* s1 = s1 xor s1' */ \
- xor rT2,rT2,rT3; /* 1: S0 = S0 xor S0' */ \
- evldw rT1,off(rKP); /* k */ \
- rotrwi rT3,a,22; /* 1: S0' = a rotr 22 */ \
- evaddw w0,w0,rT0; /* w = w + s1 */ \
- xor rT2,rT2,rT3; /* 1: S0 = S0 xor S0' */ \
- evmergelohi rT0,w4,w5; /* w[-7] */ \
- and rT3,a,b; /* 1: maj = a and b */ \
- evaddw w0,w0,rT0; /* w = w + w[-7] */ \
- CMP_K##k##_LOOP \
- add rT2,rT2,rT3; /* 1: temp2 = S0 + maj */ \
- evaddw rT1,rT1,w0; /* wk = w + k */ \
- xor rT3,a,b; /* 1: maj = a xor b */ \
- evmergehi rT0,rT1,rT1; /* wk1/wk2 */ \
- and rT3,rT3,c; /* 1: maj = maj and c */ \
- add h,h,rT0; /* 1: temp1 = temp1 + wk */ \
- add rT2,rT2,rT3; /* 1: temp2 = temp2 + maj */ \
- add g,g,rT1; /* 2: temp1 = temp1 + wk */ \
- add d,d,h; /* 1: d = d + temp1 */ \
- rotrwi rT0,d,6; /* 2: S1 = e rotr 6 */ \
- add h,h,rT2; /* 1: h = temp1 + temp2 */ \
- rotrwi rT1,d,11; /* 2: S1' = e rotr 11 */ \
- rotrwi rT2,d,25; /* 2: S" = e rotr 25 */ \
- xor rT0,rT0,rT1; /* 2: S1 = S1 xor S1' */ \
- and rT3,d,e; /* 2: ch = e and f */ \
- xor rT0,rT0,rT2; /* 2: S1 = S1 xor S1" */ \
- andc rT1,f,d; /* 2: ch' = ~e and g */ \
- add g,g,rT0; /* 2: temp1 = h + S1 */ \
- xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \
- rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \
- add g,g,rT3; /* 2: temp1 = temp1 + ch */ \
- rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \
- rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \
- xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \
- or rT2,h,a; /* 2: maj = a or b */ \
- and rT1,h,a; /* 2: maj' = a and b */ \
- and rT2,rT2,b; /* 2: maj = maj and c */ \
- xor rT3,rT0,rT3; /* 2: S0 = S0 xor S0" */ \
- or rT2,rT1,rT2; /* 2: maj = maj or maj' */ \
- add c,c,g; /* 2: d = d + temp1 */ \
- add rT3,rT3,rT2; /* 2: temp2 = S0 + maj */ \
- add g,g,rT3 /* 2: h = temp1 + temp2 */
-
-_GLOBAL(ppc_spe_sha256_transform)
- INITIALIZE
-
- mtctr r5
- lwz rH0,0(rHP)
- lwz rH1,4(rHP)
- lwz rH2,8(rHP)
- lwz rH3,12(rHP)
- lwz rH4,16(rHP)
- lwz rH5,20(rHP)
- lwz rH6,24(rHP)
- lwz rH7,28(rHP)
-
-ppc_spe_sha256_main:
- lis rKP,PPC_SPE_SHA256_K@ha
- addi rKP,rKP,PPC_SPE_SHA256_K@l
-
- R_LOAD_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW0, 0)
- R_LOAD_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW1, 8)
- R_LOAD_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW2, 16)
- R_LOAD_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW3, 24)
- R_LOAD_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW4, 32)
- R_LOAD_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW5, 40)
- R_LOAD_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW6, 48)
- R_LOAD_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW7, 56)
-ppc_spe_sha256_16_rounds:
- addi rKP,rKP,64
- R_CALC_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7,
- rW0, rW1, rW4, rW5, rW7, N, 0)
- R_CALC_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5,
- rW1, rW2, rW5, rW6, rW0, N, 8)
- R_CALC_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3,
- rW2, rW3, rW6, rW7, rW1, N, 16)
- R_CALC_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1,
- rW3, rW4, rW7, rW0, rW2, N, 24)
- R_CALC_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7,
- rW4, rW5, rW0, rW1, rW3, N, 32)
- R_CALC_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5,
- rW5, rW6, rW1, rW2, rW4, N, 40)
- R_CALC_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3,
- rW6, rW7, rW2, rW3, rW5, N, 48)
- R_CALC_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1,
- rW7, rW0, rW3, rW4, rW6, C, 56)
- bt gt,ppc_spe_sha256_16_rounds
-
- lwz rW0,0(rHP)
- NEXT_BLOCK
- lwz rW1,4(rHP)
- lwz rW2,8(rHP)
- lwz rW3,12(rHP)
- lwz rW4,16(rHP)
- lwz rW5,20(rHP)
- lwz rW6,24(rHP)
- lwz rW7,28(rHP)
-
- add rH0,rH0,rW0
- stw rH0,0(rHP)
- add rH1,rH1,rW1
- stw rH1,4(rHP)
- add rH2,rH2,rW2
- stw rH2,8(rHP)
- add rH3,rH3,rW3
- stw rH3,12(rHP)
- add rH4,rH4,rW4
- stw rH4,16(rHP)
- add rH5,rH5,rW5
- stw rH5,20(rHP)
- add rH6,rH6,rW6
- stw rH6,24(rHP)
- add rH7,rH7,rW7
- stw rH7,28(rHP)
-
- bdnz ppc_spe_sha256_main
-
- FINALIZE
- blr
-
-.data
-.align 5
-PPC_SPE_SHA256_K:
- .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
deleted file mode 100644
index 2997d13236e0..000000000000
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ /dev/null
@@ -1,235 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Glue code for SHA-256 implementation for SPE instructions (PPC)
- *
- * Based on generic implementation. The assembler module takes care
- * about the SPE registers so it can run from interrupt context.
- *
- * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
- */
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/byteorder.h>
-#include <asm/switch_to.h>
-#include <linux/hardirq.h>
-
-/*
- * MAX_BYTES defines the number of bytes that are allowed to be processed
- * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000
- * operations per 64 bytes. e500 cores can issue two arithmetic instructions
- * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2).
- * Thus 1KB of input data will need an estimated maximum of 18,000 cycles.
- * Headroom for cache misses included. Even with the low end model clocked
- * at 667 MHz this equals to a critical time window of less than 27us.
- *
- */
-#define MAX_BYTES 1024
-
-extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks);
-
-static void spe_begin(void)
-{
- /* We just start SPE operations and will save SPE registers later. */
- preempt_disable();
- enable_kernel_spe();
-}
-
-static void spe_end(void)
-{
- disable_kernel_spe();
- /* reenable preemption */
- preempt_enable();
-}
-
-static inline void ppc_sha256_clear_context(struct sha256_state *sctx)
-{
- int count = sizeof(struct sha256_state) >> 2;
- u32 *ptr = (u32 *)sctx;
-
- /* make sure we can clear the fast way */
- BUILD_BUG_ON(sizeof(struct sha256_state) % 4);
- do { *ptr++ = 0; } while (--count);
-}
-
-static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->count & 0x3f;
- const unsigned int avail = 64 - offset;
- unsigned int bytes;
- const u8 *src = data;
-
- if (avail > len) {
- sctx->count += len;
- memcpy((char *)sctx->buf + offset, src, len);
- return 0;
- }
-
- sctx->count += len;
-
- if (offset) {
- memcpy((char *)sctx->buf + offset, src, avail);
-
- spe_begin();
- ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1);
- spe_end();
-
- len -= avail;
- src += avail;
- }
-
- while (len > 63) {
- /* cut input data into smaller blocks */
- bytes = (len > MAX_BYTES) ? MAX_BYTES : len;
- bytes = bytes & ~0x3f;
-
- spe_begin();
- ppc_spe_sha256_transform(sctx->state, src, bytes >> 6);
- spe_end();
-
- src += bytes;
- len -= bytes;
- }
-
- memcpy((char *)sctx->buf, src, len);
- return 0;
-}
-
-static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- const unsigned int offset = sctx->count & 0x3f;
- char *p = (char *)sctx->buf + offset;
- int padlen;
- __be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56);
- __be32 *dst = (__be32 *)out;
-
- padlen = 55 - offset;
- *p++ = 0x80;
-
- spe_begin();
-
- if (padlen < 0) {
- memset(p, 0x00, padlen + sizeof (u64));
- ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
- p = (char *)sctx->buf;
- padlen = 56;
- }
-
- memset(p, 0, padlen);
- *pbits = cpu_to_be64(sctx->count << 3);
- ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
-
- spe_end();
-
- dst[0] = cpu_to_be32(sctx->state[0]);
- dst[1] = cpu_to_be32(sctx->state[1]);
- dst[2] = cpu_to_be32(sctx->state[2]);
- dst[3] = cpu_to_be32(sctx->state[3]);
- dst[4] = cpu_to_be32(sctx->state[4]);
- dst[5] = cpu_to_be32(sctx->state[5]);
- dst[6] = cpu_to_be32(sctx->state[6]);
- dst[7] = cpu_to_be32(sctx->state[7]);
-
- ppc_sha256_clear_context(sctx);
- return 0;
-}
-
-static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out)
-{
- __be32 D[SHA256_DIGEST_SIZE >> 2];
- __be32 *dst = (__be32 *)out;
-
- ppc_spe_sha256_final(desc, (u8 *)D);
-
- /* avoid bytewise memcpy */
- dst[0] = D[0];
- dst[1] = D[1];
- dst[2] = D[2];
- dst[3] = D[3];
- dst[4] = D[4];
- dst[5] = D[5];
- dst[6] = D[6];
-
- /* clear sensitive data */
- memzero_explicit(D, SHA256_DIGEST_SIZE);
- return 0;
-}
-
-static int ppc_spe_sha256_export(struct shash_desc *desc, void *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int ppc_spe_sha256_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-
-static struct shash_alg algs[2] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = ppc_spe_sha256_update,
- .final = ppc_spe_sha256_final,
- .export = ppc_spe_sha256_export,
- .import = ppc_spe_sha256_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-ppc-spe",
- .cra_priority = 300,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = ppc_spe_sha256_update,
- .final = ppc_spe_sha224_final,
- .export = ppc_spe_sha256_export,
- .import = ppc_spe_sha256_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-ppc-spe",
- .cra_priority = 300,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init ppc_spe_sha256_mod_init(void)
-{
- return crypto_register_shashes(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit ppc_spe_sha256_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-module_init(ppc_spe_sha256_mod_init);
-module_exit(ppc_spe_sha256_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, SPE optimized");
-
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha224-ppc-spe");
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha256-ppc-spe");