summaryrefslogtreecommitdiff
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c117
1 files changed, 46 insertions, 71 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 5ef43231fe77..79704fbc523b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -21,6 +21,7 @@
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
+#include <linux/fips.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/backing-dev.h>
@@ -120,7 +121,6 @@ struct iv_benbi_private {
#define LMK_SEED_SIZE 64 /* hash + 0 */
struct iv_lmk_private {
- struct crypto_shash *hash_tfm;
u8 *seed;
};
@@ -254,22 +254,15 @@ static unsigned int max_write_size = 0;
module_param(max_write_size, uint, 0644);
MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
-static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio)
+static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio, bool no_split)
{
struct crypt_config *cc = ti->private;
unsigned val, sector_align;
bool wrt = op_is_write(bio_op(bio));
- if (wrt) {
- /*
- * For zoned devices, splitting write operations creates the
- * risk of deadlocking queue freeze operations with zone write
- * plugging BIO work when the reminder of a split BIO is
- * issued. So always allow the entire BIO to proceed.
- */
- if (ti->emulate_zone_append)
- return bio_sectors(bio);
-
+ if (no_split) {
+ val = -1;
+ } else if (wrt) {
val = min_not_zero(READ_ONCE(max_write_size),
DM_CRYPT_DEFAULT_MAX_WRITE_SIZE);
} else {
@@ -465,10 +458,6 @@ static void crypt_iv_lmk_dtr(struct crypt_config *cc)
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
- if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
- crypto_free_shash(lmk->hash_tfm);
- lmk->hash_tfm = NULL;
-
kfree_sensitive(lmk->seed);
lmk->seed = NULL;
}
@@ -483,11 +472,10 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL;
}
- lmk->hash_tfm = crypto_alloc_shash("md5", 0,
- CRYPTO_ALG_ALLOCATES_MEMORY);
- if (IS_ERR(lmk->hash_tfm)) {
- ti->error = "Error initializing LMK hash";
- return PTR_ERR(lmk->hash_tfm);
+ if (fips_enabled) {
+ ti->error = "LMK support is disabled due to FIPS";
+ /* ... because it uses MD5. */
+ return -EINVAL;
}
/* No seed in LMK version 2 */
@@ -498,7 +486,6 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
if (!lmk->seed) {
- crypt_iv_lmk_dtr(cc);
ti->error = "Error kmallocing seed storage in LMK";
return -ENOMEM;
}
@@ -514,7 +501,7 @@ static int crypt_iv_lmk_init(struct crypt_config *cc)
/* LMK seed is on the position of LMK_KEYS + 1 key */
if (lmk->seed)
memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
- crypto_shash_digestsize(lmk->hash_tfm));
+ MD5_DIGEST_SIZE);
return 0;
}
@@ -529,55 +516,31 @@ static int crypt_iv_lmk_wipe(struct crypt_config *cc)
return 0;
}
-static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
- struct dm_crypt_request *dmreq,
- u8 *data)
+static void crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq, u8 *data)
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
- SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
- union {
- struct md5_state md5state;
- u8 state[CRYPTO_MD5_STATESIZE];
- } u;
+ struct md5_ctx ctx;
__le32 buf[4];
- int i, r;
- desc->tfm = lmk->hash_tfm;
+ md5_init(&ctx);
- r = crypto_shash_init(desc);
- if (r)
- return r;
-
- if (lmk->seed) {
- r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
- if (r)
- return r;
- }
+ if (lmk->seed)
+ md5_update(&ctx, lmk->seed, LMK_SEED_SIZE);
/* Sector is always 512B, block size 16, add data of blocks 1-31 */
- r = crypto_shash_update(desc, data + 16, 16 * 31);
- if (r)
- return r;
+ md5_update(&ctx, data + 16, 16 * 31);
/* Sector is cropped to 56 bits here */
buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
buf[2] = cpu_to_le32(4024);
buf[3] = 0;
- r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
- if (r)
- return r;
+ md5_update(&ctx, (u8 *)buf, sizeof(buf));
/* No MD5 padding here */
- r = crypto_shash_export(desc, &u.md5state);
- if (r)
- return r;
-
- for (i = 0; i < MD5_HASH_WORDS; i++)
- __cpu_to_le32s(&u.md5state.hash[i]);
- memcpy(iv, &u.md5state.hash, cc->iv_size);
-
- return 0;
+ cpu_to_le32_array(ctx.state.h, ARRAY_SIZE(ctx.state.h));
+ memcpy(iv, ctx.state.h, cc->iv_size);
}
static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
@@ -585,17 +548,15 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
{
struct scatterlist *sg;
u8 *src;
- int r = 0;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
src = kmap_local_page(sg_page(sg));
- r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
+ crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
kunmap_local(src);
} else
memset(iv, 0, cc->iv_size);
-
- return r;
+ return 0;
}
static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
@@ -603,21 +564,19 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
{
struct scatterlist *sg;
u8 *dst;
- int r;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
return 0;
sg = crypt_get_sg_data(cc, dmreq->sg_out);
dst = kmap_local_page(sg_page(sg));
- r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
+ crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
/* Tweak the first block of plaintext sector */
- if (!r)
- crypto_xor(dst + sg->offset, iv, cc->iv_size);
+ crypto_xor(dst + sg->offset, iv, cc->iv_size);
kunmap_local(dst);
- return r;
+ return 0;
}
static void crypt_iv_tcw_dtr(struct crypt_config *cc)
@@ -1781,7 +1740,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_folio_all(fi, clone) {
if (folio_test_large(fi.folio)) {
percpu_counter_sub(&cc->n_allocated_pages,
- 1 << folio_order(fi.folio));
+ folio_nr_pages(fi.folio));
folio_put(fi.folio);
} else {
mempool_free(&fi.folio->page, &cc->page_pool);
@@ -3496,6 +3455,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
struct dm_crypt_io *io;
struct crypt_config *cc = ti->private;
unsigned max_sectors;
+ bool no_split;
/*
* If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
@@ -3513,10 +3473,20 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
/*
* Check if bio is too large, split as needed.
+ *
+ * For zoned devices, splitting write operations creates the
+ * risk of deadlocking queue freeze operations with zone write
+ * plugging BIO work when the reminder of a split BIO is
+ * issued. So always allow the entire BIO to proceed.
*/
- max_sectors = get_max_request_sectors(ti, bio);
- if (unlikely(bio_sectors(bio) > max_sectors))
+ no_split = (ti->emulate_zone_append && op_is_write(bio_op(bio))) ||
+ (bio->bi_opf & REQ_ATOMIC);
+ max_sectors = get_max_request_sectors(ti, bio, no_split);
+ if (unlikely(bio_sectors(bio) > max_sectors)) {
+ if (unlikely(no_split))
+ return DM_MAPIO_KILL;
dm_accept_partial_bio(bio, max_sectors);
+ }
/*
* Ensure that bio is a multiple of internal sector encryption size
@@ -3762,15 +3732,20 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (ti->emulate_zone_append)
limits->max_hw_sectors = min(limits->max_hw_sectors,
BIO_MAX_VECS << PAGE_SECTORS_SHIFT);
+
+ limits->atomic_write_hw_unit_max = min(limits->atomic_write_hw_unit_max,
+ BIO_MAX_VECS << PAGE_SHIFT);
+ limits->atomic_write_hw_max = min(limits->atomic_write_hw_max,
+ BIO_MAX_VECS << PAGE_SHIFT);
}
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 28, 0},
+ .version = {1, 29, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
- .features = DM_TARGET_ZONED_HM,
+ .features = DM_TARGET_ZONED_HM | DM_TARGET_ATOMIC_WRITES,
.report_zones = crypt_report_zones,
.map = crypt_map,
.status = crypt_status,