#include <keys/user-type.h>
#include <linux/device-mapper.h>
+#include <crypto/diskcipher.h>
#define DM_MSG_PREFIX "crypt"
enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
+ CRYPT_MODE_DISKCIPHER,
+ CRYPT_MODE_SKCIPHER,
};
/*
union {
struct crypto_skcipher **tfms;
struct crypto_aead **tfms_aead;
+ struct crypto_diskcipher **tfms_diskc;
} cipher_tfm;
unsigned tfms_count;
unsigned long cipher_flags;
return crypt_integrity_aead(cc) && cc->key_mac_size;
}
+static bool crypt_mode_diskcipher(struct crypt_config *cc)
+{
+ return test_bit(CRYPT_MODE_DISKCIPHER, &cc->cipher_flags);
+}
+
+static bool crypt_mode_skcipher(struct crypt_config *cc)
+{
+ return test_bit(CRYPT_MODE_SKCIPHER, &cc->cipher_flags);
+}
+
+
/* Get sg containing data */
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg)
/*
* free the processed pages
*/
- if (rw == WRITE)
+ if ((rw == WRITE) && !crypt_mode_diskcipher(cc))
crypt_free_buffer_pages(cc, clone);
error = clone->bi_status;
bio_put(clone);
- if (rw == READ && !error) {
+ if (rw == READ && !error && !crypt_mode_diskcipher(cc)) {
kcryptd_queue_crypt(io);
return;
}
crypt_inc_pending(io);
clone_init(io, clone);
+
+ if (crypt_mode_diskcipher(cc))
+ crypto_diskcipher_set(clone, cc->cipher_tfm.tfms_diskc[0]);
+
clone->bi_iter.bi_sector = cc->start + io->sector;
if (dm_crypt_integrity_io_alloc(io, clone)) {
cc->cipher_tfm.tfms = NULL;
}
+static void crypt_free_tfms_diskcipher(struct crypt_config *cc)
+{
+ if (!cc->cipher_tfm.tfms_diskc)
+ return;
+
+ if (cc->cipher_tfm.tfms_diskc[0] && !IS_ERR(cc->cipher_tfm.tfms_diskc[0])) {
+ crypto_diskcipher_clearkey(cc->cipher_tfm.tfms_diskc[0]);
+ crypto_free_diskcipher(cc->cipher_tfm.tfms_diskc[0]);
+ cc->cipher_tfm.tfms_diskc[0] = NULL;
+ }
+
+ kfree(cc->cipher_tfm.tfms_diskc);
+ cc->cipher_tfm.tfms_diskc = NULL;
+}
+
+
static void crypt_free_tfms(struct crypt_config *cc)
{
if (crypt_integrity_aead(cc))
crypt_free_tfms_aead(cc);
+ else if (crypt_mode_diskcipher(cc))
+ crypt_free_tfms_diskcipher(cc);
else
crypt_free_tfms_skcipher(cc);
}
return err;
}
}
+ set_bit(CRYPT_MODE_SKCIPHER, &cc->cipher_flags);
return 0;
}
return 0;
}
+static int crypt_alloc_tfms_diskcipher(struct crypt_config *cc, char *ciphermode)
+{
+ int err;
+
+ cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
+ if (!cc->cipher_tfm.tfms)
+ return -ENOMEM;
+
+ cc->cipher_tfm.tfms_diskc[0] = crypto_alloc_diskcipher(ciphermode, 0, 0, 1);
+ if (IS_ERR(cc->cipher_tfm.tfms_diskc[0])) {
+ err = PTR_ERR(cc->cipher_tfm.tfms_diskc[0]);
+ crypt_free_tfms(cc);
+ pr_err("%s: no diskcipher with %s\n", __func__, ciphermode);
+ return err;
+ }
+ pr_info("%s is done with %s\n", __func__, ciphermode);
+
+ return 0;
+}
+
static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
{
if (crypt_integrity_aead(cc))
return crypt_alloc_tfms_aead(cc, ciphermode);
+ else if (crypt_mode_diskcipher(cc))
+ return crypt_alloc_tfms_diskcipher(cc, ciphermode);
else
return crypt_alloc_tfms_skcipher(cc, ciphermode);
}
r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
cc->key + (i * subkey_size),
subkey_size);
+ else if (crypt_mode_diskcipher(cc))
+ r = crypto_diskcipher_setkey(cc->cipher_tfm.tfms_diskc[i],
+ cc->key + (i * subkey_size),
+ subkey_size, 1);
else
r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
cc->key + (i * subkey_size),
return -ENOMEM;
}
cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
- } else
+ } else if (crypt_mode_skcipher(cc))
cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
ret = crypt_ctr_blkdev_cipher(cc);
*ivopts = strsep(&tmp, "-");
*ivmode = strsep(&*ivopts, ":");
+ if (*ivmode && (!strcmp(*ivmode, "disk") || !strcmp(*ivmode, "fmp")))
+ set_bit(CRYPT_MODE_DISKCIPHER, &cc->cipher_flags);
+
if (tmp)
DMWARN("Ignoring unexpected additional cipher options");
return ret;
/* Initialize IV */
- ret = crypt_ctr_ivmode(ti, ivmode);
- if (ret < 0)
- return ret;
+ if (!crypt_mode_diskcipher(cc)) {
+ ret = crypt_ctr_ivmode(ti, ivmode);
+ if (ret < 0)
+ return ret;
+ }
/* Initialize and set key */
ret = crypt_set_key(cc, key);
if (cc->key_string)
memset(cc->key, 0, cc->key_size * sizeof(u8));
+ pr_info("%s with ivmode:%s, aead:%d, diskcipher:%d(%p), skcipher:%d\n",
+ __func__, ivmode, crypt_integrity_aead(cc),
+ crypt_mode_diskcipher(cc), cc->cipher_tfm.tfms_diskc[0],
+ crypt_mode_skcipher(cc));
+
return ret;
}
ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
if (ret < 0)
goto bad;
-
if (crypt_integrity_aead(cc)) {
cc->dmreq_start = sizeof(struct aead_request);
cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
+ } else if (crypt_mode_diskcipher(cc)) {
+ cc->per_bio_data_size = ti->per_io_data_size =
+ ALIGN(sizeof(struct dm_crypt_io), ARCH_KMALLOC_MINALIGN);
+ goto get_bio;
} else {
cc->dmreq_start = sizeof(struct skcipher_request);
cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
goto bad;
}
+get_bio:
cc->bs = bioset_create(MIN_IOS, 0, (BIOSET_NEED_BVECS |
BIOSET_NEED_RESCUER));
if (!cc->bs) {
goto bad;
}
+ if (crypt_mode_diskcipher(cc)) {
+ cc->crypt_queue = NULL;
+ cc->write_thread = NULL;
+ goto out;
+ }
+
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
else
}
wake_up_process(cc->write_thread);
+out:
ti->num_flush_bios = 1;
return 0;
if (crypt_integrity_aead(cc))
io->ctx.r.req_aead = (struct aead_request *)(io + 1);
- else
+ else if (crypt_mode_skcipher(cc))
io->ctx.r.req = (struct skcipher_request *)(io + 1);
- if (bio_data_dir(io->base_bio) == READ) {
+ if ((bio_data_dir(io->base_bio) == READ) || crypt_mode_diskcipher(cc)) {
if (kcryptd_io_read(io, GFP_NOWAIT))
kcryptd_queue_read(io);
} else
limits->physical_block_size = cc->sector_size;
blk_limits_io_min(limits, cc->sector_size);
}
+
+ if (crypt_mode_diskcipher(cc))
+ limits->logical_block_size = PAGE_SIZE;
}
static struct target_type crypt_target = {