summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--crypto/ahash.c282
-rw-r--r--include/crypto/hash.h38
-rw-r--r--include/crypto/internal/hash.h5
-rw-r--r--include/linux/crypto.h2
4 files changed, 293 insertions, 34 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c
index e6bdb5ae2dac..208aa4c8d725 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -29,7 +29,7 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
struct crypto_hash_walk {
- char *data;
+ const char *data;
unsigned int offset;
unsigned int flags;
@@ -48,11 +48,17 @@ struct ahash_save_req_state {
int (*op)(struct ahash_request *req);
crypto_completion_t compl;
void *data;
+ struct scatterlist sg;
+ const u8 *src;
+ u8 *page;
+ unsigned int offset;
+ unsigned int nbytes;
};
static void ahash_reqchain_done(void *data, int err);
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
-static void ahash_restore_req(struct ahash_request *req);
+static void ahash_restore_req(struct ahash_save_req_state *state);
+static void ahash_def_finup_done1(void *data, int err);
static int ahash_def_finup(struct ahash_request *req);
static int hash_walk_next(struct crypto_hash_walk *walk)
@@ -88,20 +94,29 @@ static int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
+ walk->entrylen = 0;
- if (!walk->total) {
- walk->entrylen = 0;
+ if (!walk->total)
return 0;
+
+ walk->flags = req->base.flags;
+
+ if (ahash_request_isvirt(req)) {
+ walk->data = req->svirt;
+ walk->total = 0;
+ return req->nbytes;
}
walk->sg = req->src;
- walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
+ if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
+ return err;
+
walk->data -= walk->offset;
kunmap_local(walk->data);
@@ -188,6 +203,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
unsigned int offset;
int err;
+ if (ahash_request_isvirt(req))
+ return crypto_shash_digest(desc, req->svirt, nbytes,
+ req->result);
+
if (nbytes &&
(sg = req->src, offset = sg->offset,
nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
@@ -281,18 +300,82 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
+static bool ahash_request_hasvirt(struct ahash_request *req)
+{
+ struct ahash_request *r2;
+
+ if (ahash_request_isvirt(req))
+ return true;
+
+ list_for_each_entry(r2, &req->base.list, base.list)
+ if (ahash_request_isvirt(r2))
+ return true;
+
+ return false;
+}
+
+static int ahash_reqchain_virt(struct ahash_save_req_state *state,
+ int err, u32 mask)
+{
+ struct ahash_request *req = state->cur;
+
+ for (;;) {
+ unsigned len = state->nbytes;
+
+ req->base.err = err;
+
+ if (!state->offset)
+ break;
+
+ if (state->offset == len || err) {
+ u8 *result = req->result;
+
+ ahash_request_set_virt(req, state->src, result, len);
+ state->offset = 0;
+ break;
+ }
+
+ len -= state->offset;
+
+ len = min(PAGE_SIZE, len);
+ memcpy(state->page, state->src + state->offset, len);
+ state->offset += len;
+ req->nbytes = len;
+
+ err = state->op(req);
+ if (err == -EINPROGRESS) {
+ if (!list_empty(&state->head) ||
+ state->offset < state->nbytes)
+ err = -EBUSY;
+ break;
+ }
+
+ if (err == -EBUSY)
+ break;
+ }
+
+ return err;
+}
+
static int ahash_reqchain_finish(struct ahash_save_req_state *state,
int err, u32 mask)
{
struct ahash_request *req0 = state->req0;
struct ahash_request *req = state->cur;
+ struct crypto_ahash *tfm;
struct ahash_request *n;
+ bool update;
- req->base.err = err;
+ err = ahash_reqchain_virt(state, err, mask);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ goto out;
if (req != req0)
list_add_tail(&req->base.list, &req0->base.list);
+ tfm = crypto_ahash_reqtfm(req);
+ update = state->op == crypto_ahash_alg(tfm)->update;
+
list_for_each_entry_safe(req, n, &state->head, base.list) {
list_del_init(&req->base.list);
@@ -300,10 +383,27 @@ static int ahash_reqchain_finish(struct ahash_save_req_state *state,
req->base.complete = ahash_reqchain_done;
req->base.data = state;
state->cur = req;
+
+ if (update && ahash_request_isvirt(req) && req->nbytes) {
+ unsigned len = req->nbytes;
+ u8 *result = req->result;
+
+ state->src = req->svirt;
+ state->nbytes = len;
+
+ len = min(PAGE_SIZE, len);
+
+ memcpy(state->page, req->svirt, len);
+ state->offset = len;
+
+ ahash_request_set_crypt(req, &state->sg, result, len);
+ }
+
err = state->op(req);
if (err == -EINPROGRESS) {
- if (!list_empty(&state->head))
+ if (!list_empty(&state->head) ||
+ state->offset < state->nbytes)
err = -EBUSY;
goto out;
}
@@ -311,11 +411,14 @@ static int ahash_reqchain_finish(struct ahash_save_req_state *state,
if (err == -EBUSY)
goto out;
- req->base.err = err;
+ err = ahash_reqchain_virt(state, err, mask);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ goto out;
+
list_add_tail(&req->base.list, &req0->base.list);
}
- ahash_restore_req(req0);
+ ahash_restore_req(state);
out:
return err;
@@ -329,7 +432,7 @@ static void ahash_reqchain_done(void *data, int err)
data = state->data;
if (err == -EINPROGRESS) {
- if (!list_empty(&state->head))
+ if (!list_empty(&state->head) || state->offset < state->nbytes)
return;
goto notify;
}
@@ -346,40 +449,84 @@ static int ahash_do_req_chain(struct ahash_request *req,
int (*op)(struct ahash_request *req))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool update = op == crypto_ahash_alg(tfm)->update;
struct ahash_save_req_state *state;
struct ahash_save_req_state state0;
+ struct ahash_request *r2;
+ u8 *page = NULL;
int err;
- if (!ahash_request_chained(req) || crypto_ahash_req_chain(tfm))
+ if (crypto_ahash_req_chain(tfm) ||
+ (!ahash_request_chained(req) &&
+ (!update || !ahash_request_isvirt(req))))
return op(req);
- state = &state0;
+ if (update && ahash_request_hasvirt(req)) {
+ gfp_t gfp;
+ u32 flags;
+
+ flags = ahash_request_flags(req);
+ gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ page = (void *)__get_free_page(gfp);
+ err = -ENOMEM;
+ if (!page)
+ goto out_set_chain;
+ }
+ state = &state0;
if (ahash_is_async(tfm)) {
err = ahash_save_req(req, ahash_reqchain_done);
- if (err) {
- struct ahash_request *r2;
-
- req->base.err = err;
- list_for_each_entry(r2, &req->base.list, base.list)
- r2->base.err = err;
-
- return err;
- }
+ if (err)
+ goto out_free_page;
state = req->base.data;
}
state->op = op;
state->cur = req;
+ state->page = page;
+ state->offset = 0;
+ state->nbytes = 0;
INIT_LIST_HEAD(&state->head);
list_splice_init(&req->base.list, &state->head);
+ if (page)
+ sg_init_one(&state->sg, page, PAGE_SIZE);
+
+ if (update && ahash_request_isvirt(req) && req->nbytes) {
+ unsigned len = req->nbytes;
+ u8 *result = req->result;
+
+ state->src = req->svirt;
+ state->nbytes = len;
+
+ len = min(PAGE_SIZE, len);
+
+ memcpy(page, req->svirt, len);
+ state->offset = len;
+
+ ahash_request_set_crypt(req, &state->sg, result, len);
+ }
+
err = op(req);
if (err == -EBUSY || err == -EINPROGRESS)
return -EBUSY;
return ahash_reqchain_finish(state, err, ~0);
+
+out_free_page:
+ if (page) {
+ memset(page, 0, PAGE_SIZE);
+ free_page((unsigned long)page);
+ }
+
+out_set_chain:
+ req->base.err = err;
+ list_for_each_entry(r2, &req->base.list, base.list)
+ r2->base.err = err;
+
+ return err;
}
int crypto_ahash_init(struct ahash_request *req)
@@ -431,15 +578,19 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
req->base.complete = cplt;
req->base.data = state;
state->req0 = req;
+ state->page = NULL;
return 0;
}
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_save_req_state *state)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_save_req_state *state;
+ struct ahash_request *req = state->req0;
+ struct crypto_ahash *tfm;
+ free_page((unsigned long)state->page);
+
+ tfm = crypto_ahash_reqtfm(req);
if (!ahash_is_async(tfm))
return;
@@ -521,13 +672,74 @@ int crypto_ahash_finup(struct ahash_request *req)
return err;
}
- if (!crypto_ahash_alg(tfm)->finup)
+ if (!crypto_ahash_alg(tfm)->finup ||
+ (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)))
return ahash_def_finup(req);
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
+static int ahash_def_digest_finish(struct ahash_save_req_state *state, int err)
+{
+ struct ahash_request *req = state->req0;
+ struct crypto_ahash *tfm;
+
+ if (err)
+ goto out;
+
+ tfm = crypto_ahash_reqtfm(req);
+ if (ahash_is_async(tfm))
+ req->base.complete = ahash_def_finup_done1;
+
+ err = crypto_ahash_update(req);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return err;
+
+out:
+ ahash_restore_req(state);
+ return err;
+}
+
+static void ahash_def_digest_done(void *data, int err)
+{
+ struct ahash_save_req_state *state0 = data;
+ struct ahash_save_req_state state;
+ struct ahash_request *areq;
+
+ state = *state0;
+ areq = state.req0;
+ if (err == -EINPROGRESS)
+ goto out;
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = ahash_def_digest_finish(state0, err);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ state.compl(state.data, err);
+}
+
+static int ahash_def_digest(struct ahash_request *req)
+{
+ struct ahash_save_req_state *state;
+ int err;
+
+ err = ahash_save_req(req, ahash_def_digest_done);
+ if (err)
+ return err;
+
+ state = req->base.data;
+
+ err = crypto_ahash_init(req);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return err;
+
+ return ahash_def_digest_finish(state, err);
+}
+
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -549,6 +761,9 @@ int crypto_ahash_digest(struct ahash_request *req)
return err;
}
+ if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
+ return ahash_def_digest(req);
+
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
@@ -564,17 +779,19 @@ static void ahash_def_finup_done2(void *data, int err)
if (err == -EINPROGRESS)
return;
- ahash_restore_req(areq);
+ ahash_restore_req(state);
ahash_request_complete(areq, err);
}
-static int ahash_def_finup_finish1(struct ahash_request *req, int err)
+static int ahash_def_finup_finish1(struct ahash_save_req_state *state, int err)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ahash_request *req = state->req0;
+ struct crypto_ahash *tfm;
if (err)
goto out;
+ tfm = crypto_ahash_reqtfm(req);
if (ahash_is_async(tfm))
req->base.complete = ahash_def_finup_done2;
@@ -583,7 +800,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
return err;
out:
- ahash_restore_req(req);
+ ahash_restore_req(state);
return err;
}
@@ -600,7 +817,7 @@ static void ahash_def_finup_done1(void *data, int err)
areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- err = ahash_def_finup_finish1(areq, err);
+ err = ahash_def_finup_finish1(state0, err);
if (err == -EINPROGRESS || err == -EBUSY)
return;
@@ -610,17 +827,20 @@ out:
static int ahash_def_finup(struct ahash_request *req)
{
+ struct ahash_save_req_state *state;
int err;
err = ahash_save_req(req, ahash_def_finup_done1);
if (err)
return err;
+ state = req->base.data;
+
err = crypto_ahash_update(req);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
- return ahash_def_finup_finish1(req, err);
+ return ahash_def_finup_finish1(state, err);
}
int crypto_ahash_export(struct ahash_request *req, void *out)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 0a6f744ce4a1..4e87e39679cb 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -12,6 +12,9 @@
#include <linux/crypto.h>
#include <linux/string.h>
+/* Set this bit for virtual address instead of SG list. */
+#define CRYPTO_AHASH_REQ_VIRT 0x00000001
+
struct crypto_ahash;
/**
@@ -52,7 +55,10 @@ struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
- struct scatterlist *src;
+ union {
+ struct scatterlist *src;
+ const u8 *svirt;
+ };
u8 *result;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
@@ -610,9 +616,13 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
crypto_completion_t compl,
void *data)
{
+ u32 keep = CRYPTO_AHASH_REQ_VIRT;
+
req->base.complete = compl;
req->base.data = data;
- req->base.flags = flags;
+ flags &= ~keep;
+ req->base.flags &= keep;
+ req->base.flags |= flags;
crypto_reqchain_init(&req->base);
}
@@ -636,6 +646,30 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
req->src = src;
req->nbytes = nbytes;
req->result = result;
+ req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT;
+}
+
+/**
+ * ahash_request_set_virt() - set virtual address data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source virtual address
+ * @result: buffer that is filled with the message digest -- the caller must
+ * ensure that the buffer has sufficient space by, for example, calling
+ * crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source virtual address
+ *
+ * By using this call, the caller references the source virtual address.
+ * The source virtual address points to the data the message digest is to
+ * be calculated for.
+ */
+static inline void ahash_request_set_virt(struct ahash_request *req,
+ const u8 *src, u8 *result,
+ unsigned int nbytes)
+{
+ req->svirt = src;
+ req->nbytes = nbytes;
+ req->result = result;
+ req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
}
static inline void ahash_request_chain(struct ahash_request *req,
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 36425ecd2c37..485e22cf517e 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -252,6 +252,11 @@ static inline bool ahash_request_chained(struct ahash_request *req)
return crypto_request_chained(&req->base);
}
+static inline bool ahash_request_isvirt(struct ahash_request *req)
+{
+ return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
+}
+
static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm)
{
return crypto_tfm_req_chain(&tfm->base);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 1d2a6c515d58..61ac11226638 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -126,7 +126,7 @@
*/
#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
-/* Set if the algorithm supports request chains. */
+/* Set if the algorithm supports request chains and virtual addresses. */
#define CRYPTO_ALG_REQ_CHAIN 0x00040000
/*