summaryrefslogtreecommitdiff
path: root/crypto/tcrypt.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-03-29 10:01:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-03-29 10:01:55 -0700
commite5e0e6bebef3a21081fd1057c40468d4cff1a60d (patch)
treed8b77300838e2d31a89f267ccfb706722981c4a1 /crypto/tcrypt.c
parent7d06015d936c861160803e020f68f413b5c3cd9d (diff)
parent99585c2192cb1ce212876e82ef01d1c98c7f4699 (diff)
Merge tag 'v6.15-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Remove legacy compression interface - Improve scatterwalk API - Add request chaining to ahash and acomp - Add virtual address support to ahash and acomp - Add folio support to acomp - Remove NULL dst support from acomp Algorithms: - Library options are fuly hidden (selected by kernel users only) - Add Kerberos5 algorithms - Add VAES-based ctr(aes) on x86 - Ensure LZO respects output buffer length on compression - Remove obsolete SIMD fallback code path from arm/ghash-ce Drivers: - Add support for PCI device 0x1134 in ccp - Add support for rk3588's standalone TRNG in rockchip - Add Inside Secure SafeXcel EIP-93 crypto engine support in eip93 - Fix bugs in tegra uncovered by multi-threaded self-test - Fix corner cases in hisilicon/sec2 Others: - Add SG_MITER_LOCAL to sg miter - Convert ubifs, hibernate and xfrm_ipcomp from legacy API to acomp" * tag 'v6.15-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (187 commits) crypto: testmgr - Add multibuffer acomp testing crypto: acomp - Fix synchronous acomp chaining fallback crypto: testmgr - Add multibuffer hash testing crypto: hash - Fix synchronous ahash chaining fallback crypto: arm/ghash-ce - Remove SIMD fallback code path crypto: essiv - Replace memcpy() + NUL-termination with strscpy() crypto: api - Call crypto_alg_put in crypto_unregister_alg crypto: scompress - Fix incorrect stream freeing crypto: lib/chacha - remove unused arch-specific init support crypto: remove obsolete 'comp' compression API crypto: compress_null - drop obsolete 'comp' implementation crypto: cavium/zip - drop obsolete 'comp' implementation crypto: zstd - drop obsolete 'comp' implementation crypto: lzo - drop obsolete 'comp' implementation crypto: lzo-rle - drop obsolete 'comp' implementation crypto: lz4hc - drop obsolete 'comp' implementation crypto: lz4 - drop obsolete 'comp' implementation crypto: deflate - drop obsolete 'comp' implementation crypto: 842 - drop obsolete 'comp' implementation crypto: nx - Migrate to scomp API ...
Diffstat (limited to 'crypto/tcrypt.c')
-rw-r--r--crypto/tcrypt.c231
1 files changed, 231 insertions, 0 deletions
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 879fc21dcc16..96f4a66be14c 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -716,6 +716,207 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
return crypto_wait_req(ret, wait);
}
+struct test_mb_ahash_data {
+ struct scatterlist sg[XBUFSIZE];
+ char result[64];
+ struct ahash_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+};
+
+static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb,
+ int *rc)
+{
+ int i, err;
+
+ /* Fire up a bunch of concurrent requests */
+ err = crypto_ahash_digest(data[0].req);
+
+ /* Wait for all requests to finish */
+ err = crypto_wait_req(err, &data[0].wait);
+ if (num_mb < 2)
+ return err;
+
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = ahash_request_err(data[i].req);
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
+ int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret = 0;
+ int *rc;
+
+ rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_ahash_op(data, num_mb, rc);
+ if (ret)
+ goto out;
+ }
+
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount * num_mb, secs, (u64)bcount * blen * num_mb);
+
+out:
+ kfree(rc);
+ return ret;
+}
+
+static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
+ u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+ int *rc;
+
+ rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_ahash_op(data, num_mb, rc);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_ahash_op(data, num_mb, rc);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+out:
+ kfree(rc);
+ return ret;
+}
+
+static void test_mb_ahash_speed(const char *algo, unsigned int secs,
+ struct hash_speed *speed, u32 num_mb)
+{
+ struct test_mb_ahash_data *data;
+ struct crypto_ahash *tfm;
+ unsigned int i, j, k;
+ int ret;
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ tfm = crypto_alloc_ahash(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto free_data;
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ if (testmgr_alloc_buf(data[i].xbuf))
+ goto out;
+
+ crypto_init_wait(&data[i].wait);
+
+ data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: hash: Failed to allocate request for %s\n",
+ algo);
+ goto out;
+ }
+
+
+ if (i) {
+ ahash_request_set_callback(data[i].req, 0, NULL, NULL);
+ ahash_request_chain(data[i].req, data[0].req);
+ } else
+ ahash_request_set_callback(data[0].req, 0,
+ crypto_req_done,
+ &data[0].wait);
+
+ sg_init_table(data[i].sg, XBUFSIZE);
+ for (j = 0; j < XBUFSIZE; j++) {
+ sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
+ memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
+ }
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
+ get_driver_name(crypto_ahash, tfm));
+
+ for (i = 0; speed[i].blen != 0; i++) {
+ /* For some reason this only tests digests. */
+ if (speed[i].blen != speed[i].plen)
+ continue;
+
+ if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for tvmem (%lu)\n",
+ speed[i].blen, XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ if (klen)
+ crypto_ahash_setkey(tfm, tvmem[0], klen);
+
+ for (k = 0; k < num_mb; k++)
+ ahash_request_set_crypt(data[k].req, data[k].sg,
+ data[k].result, speed[i].blen);
+
+ pr_info("test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen,
+ speed[i].blen / speed[i].plen);
+
+ if (secs) {
+ ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
+ num_mb);
+ cond_resched();
+ } else {
+ ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
+ }
+
+
+ if (ret) {
+ pr_err("At least one hashing failed ret=%d\n", ret);
+ break;
+ }
+ }
+
+out:
+ ahash_request_free(data[0].req);
+
+ for (k = 0; k < num_mb; ++k)
+ testmgr_free_buf(data[k].xbuf);
+
+ crypto_free_ahash(tfm);
+
+free_data:
+ kfree(data);
+}
+
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int secs)
{
@@ -2383,6 +2584,36 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_ahash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
+ case 450:
+ test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
+ num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 451:
+ test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
+ num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 452:
+ test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
+ num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 453:
+ test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
+ num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 454:
+ test_mb_ahash_speed("streebog256", sec,
+ generic_hash_speed_template, num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 455:
+ test_mb_ahash_speed("streebog512", sec,
+ generic_hash_speed_template, num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
case 499:
break;