diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2025-03-15 18:30:22 +0800 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2025-03-21 17:33:39 +0800 |
commit | 2d3553ecb4e316a74571da253191c37fb90cb815 (patch) | |
tree | 78056d3a2f2f1dc641e9cef299c5ea885dfc179b /crypto | |
parent | 39a3f23407d3b6942727ae2367382b5575d995c9 (diff) |
crypto: scomp - Remove support for some non-trivial SG lists
As the only user of acomp/scomp uses a trivial single-page SG
list, remove support for everything else in preprataion for the
addition of virtual address support.
However, keep support for non-trivial source SG lists as that
user is currently jumping through hoops in order to linearise
the source data.
Limit the source SG linearisation buffer to a single page as
that user never goes over that. The only other potential user
is also unlikely to exceed that (IPComp) and it can easily do
its own linearisation if necessary.
Also keep the destination SG linearisation for IPComp.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/acompress.c | 1 | ||||
-rw-r--r-- | crypto/scompress.c | 127 |
2 files changed, 73 insertions, 55 deletions
diff --git a/crypto/acompress.c b/crypto/acompress.c index 45444e99a9db..194a4b36f97f 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -73,7 +73,6 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) acomp->compress = alg->compress; acomp->decompress = alg->decompress; - acomp->dst_free = alg->dst_free; acomp->reqsize = alg->reqsize; if (alg->exit) diff --git a/crypto/scompress.c b/crypto/scompress.c index a2ce481a10bb..4441c40f541f 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -12,8 +12,10 @@ #include <crypto/scatterwalk.h> #include <linux/cryptouser.h> #include <linux/err.h> +#include <linux/highmem.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/overflow.h> #include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/slab.h> @@ -23,9 +25,14 @@ #include "compress.h" +#define SCOMP_SCRATCH_SIZE 65400 + struct scomp_scratch { spinlock_t lock; - void *src; + union { + void *src; + unsigned long saddr; + }; void *dst; }; @@ -66,7 +73,7 @@ static void crypto_scomp_free_scratches(void) for_each_possible_cpu(i) { scratch = per_cpu_ptr(&scomp_scratch, i); - vfree(scratch->src); + free_page(scratch->saddr); vfree(scratch->dst); scratch->src = NULL; scratch->dst = NULL; @@ -79,14 +86,15 @@ static int crypto_scomp_alloc_scratches(void) int i; for_each_possible_cpu(i) { + struct page *page; void *mem; scratch = per_cpu_ptr(&scomp_scratch, i); - mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); - if (!mem) + page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0); + if (!page) goto error; - scratch->src = mem; + scratch->src = page_address(page); mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); if (!mem) goto error; @@ -161,76 +169,88 @@ unlock: static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) { + struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch); struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - void **tfm_ctx = acomp_tfm_ctx(tfm); + struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); struct crypto_scomp *scomp = *tfm_ctx; struct crypto_acomp_stream *stream; - struct scomp_scratch *scratch; + unsigned int slen = req->slen; + unsigned int dlen = req->dlen; + struct page *spage, *dpage; + unsigned int soff, doff; void *src, *dst; - unsigned int dlen; + unsigned int n; int ret; - if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) + if (!req->src || !slen) return -EINVAL; - if (req->dst && !req->dlen) + if (!req->dst || !dlen) return -EINVAL; - if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) - req->dlen = SCOMP_SCRATCH_SIZE; - - dlen = req->dlen; + soff = req->src->offset; + spage = nth_page(sg_page(req->src), soff / PAGE_SIZE); + soff = offset_in_page(soff); - scratch = raw_cpu_ptr(&scomp_scratch); - spin_lock_bh(&scratch->lock); - - if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) { - src = page_to_virt(sg_page(req->src)) + req->src->offset; - } else { - scatterwalk_map_and_copy(scratch->src, req->src, 0, - req->slen, 0); + n = slen / PAGE_SIZE; + n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE; + if (slen <= req->src->length && (!PageHighMem(nth_page(spage, n)) || + size_add(soff, slen) <= PAGE_SIZE)) + src = kmap_local_page(spage) + soff; + else src = scratch->src; - } - if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst))) - dst = page_to_virt(sg_page(req->dst)) + req->dst->offset; - else + doff = req->dst->offset; + dpage = nth_page(sg_page(req->dst), doff / PAGE_SIZE); + doff = offset_in_page(doff); + + n = dlen / PAGE_SIZE; + n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE; + if (dlen <= req->dst->length && (!PageHighMem(nth_page(dpage, n)) || + size_add(doff, dlen) <= PAGE_SIZE)) + dst = kmap_local_page(dpage) + doff; + else { + if (dlen > SCOMP_SCRATCH_SIZE) + dlen = SCOMP_SCRATCH_SIZE; dst = scratch->dst; + } + + spin_lock_bh(&scratch->lock); + + if (src == scratch->src) + memcpy_from_sglist(src, req->src, 0, slen); stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream); spin_lock(&stream->lock); if (dir) - ret = crypto_scomp_compress(scomp, src, req->slen, - dst, &req->dlen, stream->ctx); + ret = crypto_scomp_compress(scomp, src, slen, + dst, &dlen, stream->ctx); else - ret = crypto_scomp_decompress(scomp, src, req->slen, - dst, &req->dlen, stream->ctx); + ret = crypto_scomp_decompress(scomp, src, slen, + dst, &dlen, stream->ctx); + + if (dst == scratch->dst) + memcpy_to_sglist(req->dst, 0, dst, dlen); + spin_unlock(&stream->lock); - if (!ret) { - if (!req->dst) { - req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); - if (!req->dst) { - ret = -ENOMEM; - goto out; - } - } else if (req->dlen > dlen) { - ret = -ENOSPC; - goto out; - } - if (dst == scratch->dst) { - scatterwalk_map_and_copy(scratch->dst, req->dst, 0, - req->dlen, 1); - } else { - int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE); - int i; - struct page *dst_page = sg_page(req->dst); - - for (i = 0; i < nr_pages; i++) - flush_dcache_page(dst_page + i); + spin_unlock_bh(&scratch->lock); + + req->dlen = dlen; + + if (dst != scratch->dst) { + kunmap_local(dst); + dlen += doff; + for (;;) { + flush_dcache_page(dpage); + if (dlen <= PAGE_SIZE) + break; + dlen -= PAGE_SIZE; + dpage = nth_page(dpage, 1); } } -out: - spin_unlock_bh(&scratch->lock); + if (src != scratch->src) + kunmap_local(src); + return ret; } @@ -277,7 +297,6 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) crt->compress = scomp_acomp_compress; crt->decompress = scomp_acomp_decompress; - crt->dst_free = sgl_free; return 0; } |