summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2025-01-05 11:34:15 -0800
committerHerbert Xu <herbert@gondor.apana.org.au>2025-01-14 11:38:33 +0800
commit8b13c2239d8b65604b7a0ff18c2eb74b531c4c06 (patch)
tree8f71f809699cac80175fddc5366dc98c8c5b93f6 /crypto
parentf2489456fe54757098d967c1362c637da0be08a9 (diff)
crypto: skcipher - optimize initializing skcipher_walk fields
The helper functions like crypto_skcipher_blocksize() take in a pointer to a tfm object, but they actually return properties of the algorithm. As the Linux kernel is compiled with -fno-strict-aliasing, the compiler has to assume that the writes to struct skcipher_walk could clobber the tfm's pointer to its algorithm. Thus it gets repeatedly reloaded in the generated code. Therefore, replace the use of these helper functions with staightforward accesses to the struct fields. Note that while *users* of the skcipher and aead APIs are supposed to use the helper functions, this particular code is part of the API *implementation* in crypto/skcipher.c, which already accesses the algorithm struct directly in many cases. So there is no reason to prefer the helper functions here. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/skcipher.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index e54d1ad46566..6b62d816f08d 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -308,8 +308,8 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req, bool atomic)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const struct skcipher_alg *alg =
+ crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
@@ -328,9 +328,14 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->blocksize = crypto_skcipher_blocksize(tfm);
- walk->ivsize = crypto_skcipher_ivsize(tfm);
- walk->alignmask = crypto_skcipher_alignmask(tfm);
+ /*
+ * Accessing 'alg' directly generates better code than using the
+ * crypto_skcipher_blocksize() and similar helper functions here, as it
+ * prevents the algorithm pointer from being repeatedly reloaded.
+ */
+ walk->blocksize = alg->base.cra_blocksize;
+ walk->ivsize = alg->co.ivsize;
+ walk->alignmask = alg->base.cra_alignmask;
if (alg->co.base.cra_type != &crypto_skcipher_type)
walk->stride = alg->co.chunksize;
@@ -344,7 +349,7 @@ EXPORT_SYMBOL_GPL(skcipher_walk_virt);
static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct aead_request *req, bool atomic)
{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
walk->nbytes = 0;
walk->iv = req->iv;
@@ -366,10 +371,15 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
scatterwalk_done(&walk->in, 0, walk->total);
scatterwalk_done(&walk->out, 0, walk->total);
- walk->blocksize = crypto_aead_blocksize(tfm);
- walk->stride = crypto_aead_chunksize(tfm);
- walk->ivsize = crypto_aead_ivsize(tfm);
- walk->alignmask = crypto_aead_alignmask(tfm);
+ /*
+ * Accessing 'alg' directly generates better code than using the
+ * crypto_aead_blocksize() and similar helper functions here, as it
+ * prevents the algorithm pointer from being repeatedly reloaded.
+ */
+ walk->blocksize = alg->base.cra_blocksize;
+ walk->stride = alg->chunksize;
+ walk->ivsize = alg->ivsize;
+ walk->alignmask = alg->base.cra_alignmask;
return skcipher_walk_first(walk);
}