Merge tag 'v3.10.107' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / dm-crypt.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
542da317 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
1da177e4
LT
5 *
6 * This file is released under the GPL.
7 */
8
43d69034 9#include <linux/completion.h>
d1806f6a 10#include <linux/err.h>
1da177e4
LT
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bio.h>
15#include <linux/blkdev.h>
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/crypto.h>
19#include <linux/workqueue.h>
3fcfab16 20#include <linux/backing-dev.h>
60063497 21#include <linux/atomic.h>
378f058c 22#include <linux/scatterlist.h>
1da177e4 23#include <asm/page.h>
48527fa7 24#include <asm/unaligned.h>
34745785
MB
25#include <crypto/hash.h>
26#include <crypto/md5.h>
27#include <crypto/algapi.h>
1da177e4 28
586e80e6 29#include <linux/device-mapper.h>
1da177e4 30
72d94861 31#define DM_MSG_PREFIX "crypt"
1da177e4 32
1da177e4
LT
33/*
34 * context holding the current state of a multi-part conversion
35 */
36struct convert_context {
43d69034 37 struct completion restart;
1da177e4
LT
38 struct bio *bio_in;
39 struct bio *bio_out;
40 unsigned int offset_in;
41 unsigned int offset_out;
42 unsigned int idx_in;
43 unsigned int idx_out;
c66029f4 44 sector_t cc_sector;
40b6229b 45 atomic_t cc_pending;
019c8ec9 46 struct ablkcipher_request *req;
1da177e4
LT
47};
48
53017030
MB
49/*
50 * per bio private data
51 */
52struct dm_crypt_io {
49a8a920 53 struct crypt_config *cc;
53017030
MB
54 struct bio *base_bio;
55 struct work_struct work;
56
57 struct convert_context ctx;
58
40b6229b 59 atomic_t io_pending;
53017030 60 int error;
0c395b0f 61 sector_t sector;
393b47ef 62 struct dm_crypt_io *base_io;
53017030
MB
63};
64
01482b76 65struct dm_crypt_request {
b2174eeb 66 struct convert_context *ctx;
01482b76
MB
67 struct scatterlist sg_in;
68 struct scatterlist sg_out;
2dc5327d 69 sector_t iv_sector;
01482b76
MB
70};
71
1da177e4
LT
72struct crypt_config;
73
74struct crypt_iv_operations {
75 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
d469f841 76 const char *opts);
1da177e4 77 void (*dtr)(struct crypt_config *cc);
b95bf2d3 78 int (*init)(struct crypt_config *cc);
542da317 79 int (*wipe)(struct crypt_config *cc);
2dc5327d
MB
80 int (*generator)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
82 int (*post)(struct crypt_config *cc, u8 *iv,
83 struct dm_crypt_request *dmreq);
1da177e4
LT
84};
85
60473592 86struct iv_essiv_private {
b95bf2d3
MB
87 struct crypto_hash *hash_tfm;
88 u8 *salt;
60473592
MB
89};
90
91struct iv_benbi_private {
92 int shift;
93};
94
34745785
MB
95#define LMK_SEED_SIZE 64 /* hash + 0 */
96struct iv_lmk_private {
97 struct crypto_shash *hash_tfm;
98 u8 *seed;
99};
100
1da177e4
LT
101/*
102 * Crypt: maps a linear range of a block device
103 * and encrypts / decrypts at the same time.
104 */
e48d4bbf 105enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
c0297721
AK
106
107/*
019c8ec9 108 * The fields in here must be read only after initialization.
c0297721 109 */
1da177e4
LT
110struct crypt_config {
111 struct dm_dev *dev;
112 sector_t start;
113
114 /*
ddd42edf
MB
115 * pool for per bio private data, crypto requests and
116 * encryption requeusts/buffer pages
1da177e4
LT
117 */
118 mempool_t *io_pool;
ddd42edf 119 mempool_t *req_pool;
1da177e4 120 mempool_t *page_pool;
6a24c718 121 struct bio_set *bs;
1da177e4 122
cabf08e4
MB
123 struct workqueue_struct *io_queue;
124 struct workqueue_struct *crypt_queue;
3f1e9070 125
5ebaee6d 126 char *cipher;
7dbcd137 127 char *cipher_string;
5ebaee6d 128
1da177e4 129 struct crypt_iv_operations *iv_gen_ops;
79066ad3 130 union {
60473592
MB
131 struct iv_essiv_private essiv;
132 struct iv_benbi_private benbi;
34745785 133 struct iv_lmk_private lmk;
79066ad3 134 } iv_gen_private;
1da177e4
LT
135 sector_t iv_offset;
136 unsigned int iv_size;
137
fd2d231f
MP
138 /* ESSIV: struct crypto_cipher *essiv_tfm */
139 void *iv_private;
140 struct crypto_ablkcipher **tfms;
d1f96423 141 unsigned tfms_count;
c0297721 142
ddd42edf
MB
143 /*
144 * Layout of each crypto request:
145 *
146 * struct ablkcipher_request
147 * context
148 * padding
149 * struct dm_crypt_request
150 * padding
151 * IV
152 *
153 * The padding is added so that dm_crypt_request and the IV are
154 * correctly aligned.
155 */
156 unsigned int dmreq_start;
ddd42edf 157
e48d4bbf 158 unsigned long flags;
1da177e4 159 unsigned int key_size;
d1f96423 160 unsigned int key_parts;
1da177e4
LT
161 u8 key[0];
162};
163
6a24c718 164#define MIN_IOS 16
1da177e4 165#define MIN_POOL_PAGES 32
1da177e4 166
e18b890b 167static struct kmem_cache *_crypt_io_pool;
1da177e4 168
028867ac 169static void clone_init(struct dm_crypt_io *, struct bio *);
395b167c 170static void kcryptd_queue_crypt(struct dm_crypt_io *io);
2dc5327d 171static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
027581f3 172
c0297721
AK
173/*
174 * Use this to access cipher attributes that are the same for each CPU.
175 */
176static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
177{
fd2d231f 178 return cc->tfms[0];
c0297721
AK
179}
180
1da177e4
LT
181/*
182 * Different IV generation algorithms:
183 *
3c164bd8 184 * plain: the initial vector is the 32-bit little-endian version of the sector
3a4fa0a2 185 * number, padded with zeros if necessary.
1da177e4 186 *
61afef61
MB
187 * plain64: the initial vector is the 64-bit little-endian version of the sector
188 * number, padded with zeros if necessary.
189 *
3c164bd8
RS
190 * essiv: "encrypted sector|salt initial vector", the sector number is
191 * encrypted with the bulk cipher using a salt as key. The salt
192 * should be derived from the bulk cipher's key via hashing.
1da177e4 193 *
48527fa7
RS
194 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
195 * (needed for LRW-32-AES and possible other narrow block modes)
196 *
46b47730
LN
197 * null: the initial vector is always zero. Provides compatibility with
198 * obsolete loop_fish2 devices. Do not use for new devices.
199 *
34745785
MB
200 * lmk: Compatible implementation of the block chaining mode used
201 * by the Loop-AES block device encryption system
202 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
203 * It operates on full 512 byte sectors and uses CBC
204 * with an IV derived from the sector number, the data and
205 * optionally extra IV seed.
206 * This means that after decryption the first block
207 * of sector must be tweaked according to decrypted data.
208 * Loop-AES can use three encryption schemes:
209 * version 1: is plain aes-cbc mode
210 * version 2: uses 64 multikey scheme with lmk IV generator
211 * version 3: the same as version 2 with additional IV seed
212 * (it uses 65 keys, last key is used as IV seed)
213 *
1da177e4
LT
214 * plumb: unimplemented, see:
215 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
216 */
217
2dc5327d
MB
218static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
219 struct dm_crypt_request *dmreq)
1da177e4
LT
220{
221 memset(iv, 0, cc->iv_size);
283a8328 222 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
1da177e4
LT
223
224 return 0;
225}
226
61afef61 227static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
2dc5327d 228 struct dm_crypt_request *dmreq)
61afef61
MB
229{
230 memset(iv, 0, cc->iv_size);
283a8328 231 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
61afef61
MB
232
233 return 0;
234}
235
b95bf2d3
MB
236/* Initialise ESSIV - compute salt but no local memory allocations */
237static int crypt_iv_essiv_init(struct crypt_config *cc)
238{
239 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
240 struct hash_desc desc;
241 struct scatterlist sg;
c0297721 242 struct crypto_cipher *essiv_tfm;
fd2d231f 243 int err;
b95bf2d3
MB
244
245 sg_init_one(&sg, cc->key, cc->key_size);
246 desc.tfm = essiv->hash_tfm;
247 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
248
249 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
250 if (err)
251 return err;
252
fd2d231f 253 essiv_tfm = cc->iv_private;
c0297721 254
fd2d231f
MP
255 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
256 crypto_hash_digestsize(essiv->hash_tfm));
257 if (err)
258 return err;
c0297721
AK
259
260 return 0;
b95bf2d3
MB
261}
262
542da317
MB
263/* Wipe salt and reset key derived from volume key */
264static int crypt_iv_essiv_wipe(struct crypt_config *cc)
265{
266 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
267 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
c0297721 268 struct crypto_cipher *essiv_tfm;
fd2d231f 269 int r, err = 0;
542da317
MB
270
271 memset(essiv->salt, 0, salt_size);
272
fd2d231f
MP
273 essiv_tfm = cc->iv_private;
274 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
275 if (r)
276 err = r;
c0297721
AK
277
278 return err;
279}
280
281/* Set up per cpu cipher state */
282static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
283 struct dm_target *ti,
284 u8 *salt, unsigned saltsize)
285{
286 struct crypto_cipher *essiv_tfm;
287 int err;
288
289 /* Setup the essiv_tfm with the given salt */
290 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
291 if (IS_ERR(essiv_tfm)) {
292 ti->error = "Error allocating crypto tfm for ESSIV";
293 return essiv_tfm;
294 }
295
296 if (crypto_cipher_blocksize(essiv_tfm) !=
297 crypto_ablkcipher_ivsize(any_tfm(cc))) {
298 ti->error = "Block size of ESSIV cipher does "
299 "not match IV size of block cipher";
300 crypto_free_cipher(essiv_tfm);
301 return ERR_PTR(-EINVAL);
302 }
303
304 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
305 if (err) {
306 ti->error = "Failed to set key for ESSIV cipher";
307 crypto_free_cipher(essiv_tfm);
308 return ERR_PTR(err);
309 }
310
311 return essiv_tfm;
542da317
MB
312}
313
60473592
MB
314static void crypt_iv_essiv_dtr(struct crypt_config *cc)
315{
c0297721 316 struct crypto_cipher *essiv_tfm;
60473592
MB
317 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
318
b95bf2d3
MB
319 crypto_free_hash(essiv->hash_tfm);
320 essiv->hash_tfm = NULL;
321
322 kzfree(essiv->salt);
323 essiv->salt = NULL;
c0297721 324
fd2d231f 325 essiv_tfm = cc->iv_private;
c0297721 326
fd2d231f
MP
327 if (essiv_tfm)
328 crypto_free_cipher(essiv_tfm);
c0297721 329
fd2d231f 330 cc->iv_private = NULL;
60473592
MB
331}
332
1da177e4 333static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
d469f841 334 const char *opts)
1da177e4 335{
5861f1be
MB
336 struct crypto_cipher *essiv_tfm = NULL;
337 struct crypto_hash *hash_tfm = NULL;
5861f1be 338 u8 *salt = NULL;
fd2d231f 339 int err;
1da177e4 340
5861f1be 341 if (!opts) {
72d94861 342 ti->error = "Digest algorithm missing for ESSIV mode";
1da177e4
LT
343 return -EINVAL;
344 }
345
b95bf2d3 346 /* Allocate hash algorithm */
35058687
HX
347 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
348 if (IS_ERR(hash_tfm)) {
72d94861 349 ti->error = "Error initializing ESSIV hash";
5861f1be
MB
350 err = PTR_ERR(hash_tfm);
351 goto bad;
1da177e4
LT
352 }
353
b95bf2d3 354 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
5861f1be 355 if (!salt) {
72d94861 356 ti->error = "Error kmallocing salt storage in ESSIV";
5861f1be
MB
357 err = -ENOMEM;
358 goto bad;
1da177e4
LT
359 }
360
b95bf2d3 361 cc->iv_gen_private.essiv.salt = salt;
b95bf2d3
MB
362 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
363
fd2d231f
MP
364 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
365 crypto_hash_digestsize(hash_tfm));
366 if (IS_ERR(essiv_tfm)) {
367 crypt_iv_essiv_dtr(cc);
368 return PTR_ERR(essiv_tfm);
c0297721 369 }
fd2d231f 370 cc->iv_private = essiv_tfm;
c0297721 371
1da177e4 372 return 0;
5861f1be
MB
373
374bad:
5861f1be
MB
375 if (hash_tfm && !IS_ERR(hash_tfm))
376 crypto_free_hash(hash_tfm);
b95bf2d3 377 kfree(salt);
5861f1be 378 return err;
1da177e4
LT
379}
380
2dc5327d
MB
381static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
382 struct dm_crypt_request *dmreq)
1da177e4 383{
fd2d231f 384 struct crypto_cipher *essiv_tfm = cc->iv_private;
c0297721 385
1da177e4 386 memset(iv, 0, cc->iv_size);
283a8328 387 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
c0297721
AK
388 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
389
1da177e4
LT
390 return 0;
391}
392
48527fa7
RS
393static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
394 const char *opts)
395{
c0297721 396 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
f0d1b0b3 397 int log = ilog2(bs);
48527fa7
RS
398
399 /* we need to calculate how far we must shift the sector count
400 * to get the cipher block count, we use this shift in _gen */
401
402 if (1 << log != bs) {
403 ti->error = "cypher blocksize is not a power of 2";
404 return -EINVAL;
405 }
406
407 if (log > 9) {
408 ti->error = "cypher blocksize is > 512";
409 return -EINVAL;
410 }
411
60473592 412 cc->iv_gen_private.benbi.shift = 9 - log;
48527fa7
RS
413
414 return 0;
415}
416
417static void crypt_iv_benbi_dtr(struct crypt_config *cc)
418{
48527fa7
RS
419}
420
2dc5327d
MB
421static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
422 struct dm_crypt_request *dmreq)
48527fa7 423{
79066ad3
HX
424 __be64 val;
425
48527fa7 426 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
79066ad3 427
2dc5327d 428 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
79066ad3 429 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
48527fa7 430
1da177e4
LT
431 return 0;
432}
433
2dc5327d
MB
434static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
435 struct dm_crypt_request *dmreq)
46b47730
LN
436{
437 memset(iv, 0, cc->iv_size);
438
439 return 0;
440}
441
34745785
MB
442static void crypt_iv_lmk_dtr(struct crypt_config *cc)
443{
444 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
445
446 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
447 crypto_free_shash(lmk->hash_tfm);
448 lmk->hash_tfm = NULL;
449
450 kzfree(lmk->seed);
451 lmk->seed = NULL;
452}
453
454static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
455 const char *opts)
456{
457 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
458
459 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
460 if (IS_ERR(lmk->hash_tfm)) {
461 ti->error = "Error initializing LMK hash";
462 return PTR_ERR(lmk->hash_tfm);
463 }
464
465 /* No seed in LMK version 2 */
466 if (cc->key_parts == cc->tfms_count) {
467 lmk->seed = NULL;
468 return 0;
469 }
470
471 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
472 if (!lmk->seed) {
473 crypt_iv_lmk_dtr(cc);
474 ti->error = "Error kmallocing seed storage in LMK";
475 return -ENOMEM;
476 }
477
478 return 0;
479}
480
481static int crypt_iv_lmk_init(struct crypt_config *cc)
482{
483 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
484 int subkey_size = cc->key_size / cc->key_parts;
485
486 /* LMK seed is on the position of LMK_KEYS + 1 key */
487 if (lmk->seed)
488 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
489 crypto_shash_digestsize(lmk->hash_tfm));
490
491 return 0;
492}
493
494static int crypt_iv_lmk_wipe(struct crypt_config *cc)
495{
496 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
497
498 if (lmk->seed)
499 memset(lmk->seed, 0, LMK_SEED_SIZE);
500
501 return 0;
502}
503
504static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
505 struct dm_crypt_request *dmreq,
506 u8 *data)
507{
508 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
509 struct {
510 struct shash_desc desc;
511 char ctx[crypto_shash_descsize(lmk->hash_tfm)];
512 } sdesc;
513 struct md5_state md5state;
514 u32 buf[4];
515 int i, r;
516
517 sdesc.desc.tfm = lmk->hash_tfm;
518 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
519
520 r = crypto_shash_init(&sdesc.desc);
521 if (r)
522 return r;
523
524 if (lmk->seed) {
525 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
526 if (r)
527 return r;
528 }
529
530 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
531 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
532 if (r)
533 return r;
534
535 /* Sector is cropped to 56 bits here */
536 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
537 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
538 buf[2] = cpu_to_le32(4024);
539 buf[3] = 0;
540 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
541 if (r)
542 return r;
543
544 /* No MD5 padding here */
545 r = crypto_shash_export(&sdesc.desc, &md5state);
546 if (r)
547 return r;
548
549 for (i = 0; i < MD5_HASH_WORDS; i++)
550 __cpu_to_le32s(&md5state.hash[i]);
551 memcpy(iv, &md5state.hash, cc->iv_size);
552
553 return 0;
554}
555
556static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
557 struct dm_crypt_request *dmreq)
558{
559 u8 *src;
560 int r = 0;
561
562 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
c2e022cb 563 src = kmap_atomic(sg_page(&dmreq->sg_in));
34745785 564 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
c2e022cb 565 kunmap_atomic(src);
34745785
MB
566 } else
567 memset(iv, 0, cc->iv_size);
568
569 return r;
570}
571
572static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
573 struct dm_crypt_request *dmreq)
574{
575 u8 *dst;
576 int r;
577
578 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
579 return 0;
580
c2e022cb 581 dst = kmap_atomic(sg_page(&dmreq->sg_out));
34745785
MB
582 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
583
584 /* Tweak the first block of plaintext sector */
585 if (!r)
586 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
587
c2e022cb 588 kunmap_atomic(dst);
34745785
MB
589 return r;
590}
591
1da177e4
LT
592static struct crypt_iv_operations crypt_iv_plain_ops = {
593 .generator = crypt_iv_plain_gen
594};
595
61afef61
MB
596static struct crypt_iv_operations crypt_iv_plain64_ops = {
597 .generator = crypt_iv_plain64_gen
598};
599
1da177e4
LT
600static struct crypt_iv_operations crypt_iv_essiv_ops = {
601 .ctr = crypt_iv_essiv_ctr,
602 .dtr = crypt_iv_essiv_dtr,
b95bf2d3 603 .init = crypt_iv_essiv_init,
542da317 604 .wipe = crypt_iv_essiv_wipe,
1da177e4
LT
605 .generator = crypt_iv_essiv_gen
606};
607
48527fa7
RS
608static struct crypt_iv_operations crypt_iv_benbi_ops = {
609 .ctr = crypt_iv_benbi_ctr,
610 .dtr = crypt_iv_benbi_dtr,
611 .generator = crypt_iv_benbi_gen
612};
1da177e4 613
46b47730
LN
614static struct crypt_iv_operations crypt_iv_null_ops = {
615 .generator = crypt_iv_null_gen
616};
617
34745785
MB
618static struct crypt_iv_operations crypt_iv_lmk_ops = {
619 .ctr = crypt_iv_lmk_ctr,
620 .dtr = crypt_iv_lmk_dtr,
621 .init = crypt_iv_lmk_init,
622 .wipe = crypt_iv_lmk_wipe,
623 .generator = crypt_iv_lmk_gen,
624 .post = crypt_iv_lmk_post
625};
626
d469f841
MB
627static void crypt_convert_init(struct crypt_config *cc,
628 struct convert_context *ctx,
629 struct bio *bio_out, struct bio *bio_in,
fcd369da 630 sector_t sector)
1da177e4
LT
631{
632 ctx->bio_in = bio_in;
633 ctx->bio_out = bio_out;
634 ctx->offset_in = 0;
635 ctx->offset_out = 0;
636 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
637 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
c66029f4 638 ctx->cc_sector = sector + cc->iv_offset;
43d69034 639 init_completion(&ctx->restart);
1da177e4
LT
640}
641
b2174eeb
HY
642static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
643 struct ablkcipher_request *req)
644{
645 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
646}
647
648static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
649 struct dm_crypt_request *dmreq)
650{
651 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
652}
653
2dc5327d
MB
654static u8 *iv_of_dmreq(struct crypt_config *cc,
655 struct dm_crypt_request *dmreq)
656{
657 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
658 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
659}
660
01482b76 661static int crypt_convert_block(struct crypt_config *cc,
3a7f6c99
MB
662 struct convert_context *ctx,
663 struct ablkcipher_request *req)
01482b76
MB
664{
665 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
666 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
3a7f6c99
MB
667 struct dm_crypt_request *dmreq;
668 u8 *iv;
40b6229b 669 int r;
3a7f6c99 670
b2174eeb 671 dmreq = dmreq_of_req(cc, req);
2dc5327d 672 iv = iv_of_dmreq(cc, dmreq);
01482b76 673
c66029f4 674 dmreq->iv_sector = ctx->cc_sector;
b2174eeb 675 dmreq->ctx = ctx;
3a7f6c99
MB
676 sg_init_table(&dmreq->sg_in, 1);
677 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
01482b76
MB
678 bv_in->bv_offset + ctx->offset_in);
679
3a7f6c99
MB
680 sg_init_table(&dmreq->sg_out, 1);
681 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
01482b76
MB
682 bv_out->bv_offset + ctx->offset_out);
683
684 ctx->offset_in += 1 << SECTOR_SHIFT;
685 if (ctx->offset_in >= bv_in->bv_len) {
686 ctx->offset_in = 0;
687 ctx->idx_in++;
688 }
689
690 ctx->offset_out += 1 << SECTOR_SHIFT;
691 if (ctx->offset_out >= bv_out->bv_len) {
692 ctx->offset_out = 0;
693 ctx->idx_out++;
694 }
695
3a7f6c99 696 if (cc->iv_gen_ops) {
2dc5327d 697 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
3a7f6c99
MB
698 if (r < 0)
699 return r;
700 }
701
702 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
703 1 << SECTOR_SHIFT, iv);
704
705 if (bio_data_dir(ctx->bio_in) == WRITE)
706 r = crypto_ablkcipher_encrypt(req);
707 else
708 r = crypto_ablkcipher_decrypt(req);
709
2dc5327d
MB
710 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
711 r = cc->iv_gen_ops->post(cc, iv, dmreq);
712
3a7f6c99 713 return r;
01482b76
MB
714}
715
95497a96
MB
716static void kcryptd_async_done(struct crypto_async_request *async_req,
717 int error);
c0297721 718
ddd42edf
MB
719static void crypt_alloc_req(struct crypt_config *cc,
720 struct convert_context *ctx)
721{
c66029f4 722 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
c0297721 723
019c8ec9
MP
724 if (!ctx->req)
725 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
c0297721 726
019c8ec9
MP
727 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
728 ablkcipher_request_set_callback(ctx->req,
c0297721 729 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
019c8ec9 730 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
ddd42edf
MB
731}
732
1da177e4
LT
733/*
734 * Encrypt / decrypt data from one bio to another one (can be the same one)
735 */
736static int crypt_convert(struct crypt_config *cc,
d469f841 737 struct convert_context *ctx)
1da177e4 738{
3f1e9070 739 int r;
1da177e4 740
40b6229b 741 atomic_set(&ctx->cc_pending, 1);
c8081618 742
1da177e4
LT
743 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
744 ctx->idx_out < ctx->bio_out->bi_vcnt) {
1da177e4 745
3a7f6c99
MB
746 crypt_alloc_req(cc, ctx);
747
40b6229b 748 atomic_inc(&ctx->cc_pending);
3f1e9070 749
019c8ec9 750 r = crypt_convert_block(cc, ctx, ctx->req);
3a7f6c99
MB
751
752 switch (r) {
3f1e9070 753 /* async */
3a7f6c99
MB
754 case -EBUSY:
755 wait_for_completion(&ctx->restart);
756 INIT_COMPLETION(ctx->restart);
757 /* fall through*/
758 case -EINPROGRESS:
019c8ec9 759 ctx->req = NULL;
c66029f4 760 ctx->cc_sector++;
3f1e9070
MB
761 continue;
762
763 /* sync */
3a7f6c99 764 case 0:
40b6229b 765 atomic_dec(&ctx->cc_pending);
c66029f4 766 ctx->cc_sector++;
c7f1b204 767 cond_resched();
3a7f6c99 768 continue;
3a7f6c99 769
3f1e9070
MB
770 /* error */
771 default:
40b6229b 772 atomic_dec(&ctx->cc_pending);
3f1e9070
MB
773 return r;
774 }
1da177e4
LT
775 }
776
3f1e9070 777 return 0;
1da177e4
LT
778}
779
780/*
781 * Generate a new unfragmented bio with the given size
782 * This should never violate the device limitations
933f01d4
MB
783 * May return a smaller bio when running out of pages, indicated by
784 * *out_of_pages set to 1.
1da177e4 785 */
933f01d4
MB
786static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
787 unsigned *out_of_pages)
1da177e4 788{
49a8a920 789 struct crypt_config *cc = io->cc;
8b004457 790 struct bio *clone;
1da177e4 791 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
b4e3ca1a 792 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
91e10625
MB
793 unsigned i, len;
794 struct page *page;
1da177e4 795
2f9941b6 796 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
8b004457 797 if (!clone)
1da177e4 798 return NULL;
1da177e4 799
027581f3 800 clone_init(io, clone);
933f01d4 801 *out_of_pages = 0;
6a24c718 802
f97380bc 803 for (i = 0; i < nr_iovecs; i++) {
91e10625 804 page = mempool_alloc(cc->page_pool, gfp_mask);
933f01d4
MB
805 if (!page) {
806 *out_of_pages = 1;
1da177e4 807 break;
933f01d4 808 }
1da177e4
LT
809
810 /*
aeb2deae
MP
811 * If additional pages cannot be allocated without waiting,
812 * return a partially-allocated bio. The caller will then try
813 * to allocate more bios while submitting this partial bio.
1da177e4 814 */
aeb2deae 815 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
1da177e4 816
91e10625
MB
817 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
818
819 if (!bio_add_page(clone, page, len, 0)) {
820 mempool_free(page, cc->page_pool);
821 break;
822 }
1da177e4 823
91e10625 824 size -= len;
1da177e4
LT
825 }
826
8b004457
MB
827 if (!clone->bi_size) {
828 bio_put(clone);
1da177e4
LT
829 return NULL;
830 }
831
8b004457 832 return clone;
1da177e4
LT
833}
834
644bd2f0 835static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1da177e4 836{
644bd2f0 837 unsigned int i;
1da177e4
LT
838 struct bio_vec *bv;
839
cb34e057 840 bio_for_each_segment_all(bv, clone, i) {
1da177e4
LT
841 BUG_ON(!bv->bv_page);
842 mempool_free(bv->bv_page, cc->page_pool);
843 bv->bv_page = NULL;
844 }
845}
846
49a8a920 847static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
dc440d1e
MB
848 struct bio *bio, sector_t sector)
849{
dc440d1e
MB
850 struct dm_crypt_io *io;
851
852 io = mempool_alloc(cc->io_pool, GFP_NOIO);
49a8a920 853 io->cc = cc;
dc440d1e
MB
854 io->base_bio = bio;
855 io->sector = sector;
856 io->error = 0;
393b47ef 857 io->base_io = NULL;
019c8ec9 858 io->ctx.req = NULL;
40b6229b 859 atomic_set(&io->io_pending, 0);
dc440d1e
MB
860
861 return io;
862}
863
3e1a8bdd
MB
864static void crypt_inc_pending(struct dm_crypt_io *io)
865{
40b6229b 866 atomic_inc(&io->io_pending);
3e1a8bdd
MB
867}
868
1da177e4
LT
869/*
870 * One of the bios was finished. Check for completion of
871 * the whole request and correctly clean up the buffer.
393b47ef 872 * If base_io is set, wait for the last fragment to complete.
1da177e4 873 */
5742fd77 874static void crypt_dec_pending(struct dm_crypt_io *io)
1da177e4 875{
49a8a920 876 struct crypt_config *cc = io->cc;
b35f8caa
MB
877 struct bio *base_bio = io->base_bio;
878 struct dm_crypt_io *base_io = io->base_io;
879 int error = io->error;
1da177e4 880
40b6229b 881 if (!atomic_dec_and_test(&io->io_pending))
1da177e4
LT
882 return;
883
019c8ec9
MP
884 if (io->ctx.req)
885 mempool_free(io->ctx.req, cc->req_pool);
b35f8caa
MB
886 mempool_free(io, cc->io_pool);
887
888 if (likely(!base_io))
889 bio_endio(base_bio, error);
393b47ef 890 else {
b35f8caa
MB
891 if (error && !base_io->error)
892 base_io->error = error;
893 crypt_dec_pending(base_io);
393b47ef 894 }
1da177e4
LT
895}
896
897/*
cabf08e4 898 * kcryptd/kcryptd_io:
1da177e4
LT
899 *
900 * Needed because it would be very unwise to do decryption in an
23541d2d 901 * interrupt context.
cabf08e4
MB
902 *
903 * kcryptd performs the actual encryption or decryption.
904 *
905 * kcryptd_io performs the IO submission.
906 *
907 * They must be separated as otherwise the final stages could be
908 * starved by new requests which can block in the first stages due
909 * to memory allocation.
c0297721
AK
910 *
911 * The work is done per CPU global for all dm-crypt instances.
912 * They should not depend on each other and do not block.
1da177e4 913 */
6712ecf8 914static void crypt_endio(struct bio *clone, int error)
8b004457 915{
028867ac 916 struct dm_crypt_io *io = clone->bi_private;
49a8a920 917 struct crypt_config *cc = io->cc;
ee7a491e 918 unsigned rw = bio_data_dir(clone);
8b004457 919
adfe4770
MB
920 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
921 error = -EIO;
922
8b004457 923 /*
6712ecf8 924 * free the processed pages
8b004457 925 */
ee7a491e 926 if (rw == WRITE)
644bd2f0 927 crypt_free_buffer_pages(cc, clone);
8b004457
MB
928
929 bio_put(clone);
8b004457 930
ee7a491e
MB
931 if (rw == READ && !error) {
932 kcryptd_queue_crypt(io);
933 return;
934 }
5742fd77
MB
935
936 if (unlikely(error))
937 io->error = error;
938
939 crypt_dec_pending(io);
8b004457
MB
940}
941
028867ac 942static void clone_init(struct dm_crypt_io *io, struct bio *clone)
8b004457 943{
49a8a920 944 struct crypt_config *cc = io->cc;
8b004457
MB
945
946 clone->bi_private = io;
947 clone->bi_end_io = crypt_endio;
948 clone->bi_bdev = cc->dev->bdev;
949 clone->bi_rw = io->base_bio->bi_rw;
950}
951
20c82538 952static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
8b004457 953{
49a8a920 954 struct crypt_config *cc = io->cc;
8b004457
MB
955 struct bio *base_bio = io->base_bio;
956 struct bio *clone;
93e605c2 957
8b004457
MB
958 /*
959 * The block layer might modify the bvec array, so always
960 * copy the required bvecs because we need the original
961 * one in order to decrypt the whole bio data *afterwards*.
962 */
bf800ef1 963 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
7eaceacc 964 if (!clone)
20c82538 965 return 1;
8b004457 966
20c82538
MB
967 crypt_inc_pending(io);
968
8b004457 969 clone_init(io, clone);
0c395b0f 970 clone->bi_sector = cc->start + io->sector;
8b004457 971
93e605c2 972 generic_make_request(clone);
20c82538 973 return 0;
8b004457
MB
974}
975
4e4eef64
MB
976static void kcryptd_io_write(struct dm_crypt_io *io)
977{
95497a96 978 struct bio *clone = io->ctx.bio_out;
95497a96 979 generic_make_request(clone);
4e4eef64
MB
980}
981
395b167c
AK
982static void kcryptd_io(struct work_struct *work)
983{
984 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
985
20c82538
MB
986 if (bio_data_dir(io->base_bio) == READ) {
987 crypt_inc_pending(io);
988 if (kcryptd_io_read(io, GFP_NOIO))
989 io->error = -ENOMEM;
990 crypt_dec_pending(io);
991 } else
395b167c
AK
992 kcryptd_io_write(io);
993}
994
995static void kcryptd_queue_io(struct dm_crypt_io *io)
996{
49a8a920 997 struct crypt_config *cc = io->cc;
395b167c
AK
998
999 INIT_WORK(&io->work, kcryptd_io);
1000 queue_work(cc->io_queue, &io->work);
1001}
1002
72c6e7af 1003static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
4e4eef64 1004{
dec1cedf 1005 struct bio *clone = io->ctx.bio_out;
49a8a920 1006 struct crypt_config *cc = io->cc;
dec1cedf 1007
72c6e7af 1008 if (unlikely(io->error < 0)) {
dec1cedf
MB
1009 crypt_free_buffer_pages(cc, clone);
1010 bio_put(clone);
6c031f41 1011 crypt_dec_pending(io);
dec1cedf
MB
1012 return;
1013 }
1014
1015 /* crypt_convert should have filled the clone bio */
1016 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
1017
1018 clone->bi_sector = cc->start + io->sector;
899c95d3 1019
95497a96
MB
1020 if (async)
1021 kcryptd_queue_io(io);
1e37bb8e 1022 else
95497a96 1023 generic_make_request(clone);
4e4eef64
MB
1024}
1025
fc5a5e9a 1026static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
8b004457 1027{
49a8a920 1028 struct crypt_config *cc = io->cc;
8b004457 1029 struct bio *clone;
393b47ef 1030 struct dm_crypt_io *new_io;
c8081618 1031 int crypt_finished;
933f01d4 1032 unsigned out_of_pages = 0;
dec1cedf 1033 unsigned remaining = io->base_bio->bi_size;
b635b00e 1034 sector_t sector = io->sector;
dec1cedf 1035 int r;
8b004457 1036
fc5a5e9a
MB
1037 /*
1038 * Prevent io from disappearing until this function completes.
1039 */
1040 crypt_inc_pending(io);
b635b00e 1041 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
fc5a5e9a 1042
93e605c2
MB
1043 /*
1044 * The allocated buffers can be smaller than the whole bio,
1045 * so repeat the whole process until all the data can be handled.
1046 */
1047 while (remaining) {
933f01d4 1048 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
23541d2d 1049 if (unlikely(!clone)) {
5742fd77 1050 io->error = -ENOMEM;
fc5a5e9a 1051 break;
23541d2d 1052 }
93e605c2 1053
53017030
MB
1054 io->ctx.bio_out = clone;
1055 io->ctx.idx_out = 0;
93e605c2 1056
dec1cedf 1057 remaining -= clone->bi_size;
b635b00e 1058 sector += bio_sectors(clone);
93e605c2 1059
4e594098 1060 crypt_inc_pending(io);
72c6e7af 1061
dec1cedf 1062 r = crypt_convert(cc, &io->ctx);
72c6e7af
MP
1063 if (r < 0)
1064 io->error = -EIO;
1065
40b6229b 1066 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
f97380bc 1067
c8081618
MB
1068 /* Encryption was already finished, submit io now */
1069 if (crypt_finished) {
72c6e7af 1070 kcryptd_crypt_write_io_submit(io, 0);
c8081618
MB
1071
1072 /*
1073 * If there was an error, do not try next fragments.
1074 * For async, error is processed in async handler.
1075 */
6c031f41 1076 if (unlikely(r < 0))
fc5a5e9a 1077 break;
b635b00e
MB
1078
1079 io->sector = sector;
4e594098 1080 }
93e605c2 1081
933f01d4
MB
1082 /*
1083 * Out of memory -> run queues
1084 * But don't wait if split was due to the io size restriction
1085 */
1086 if (unlikely(out_of_pages))
8aa7e847 1087 congestion_wait(BLK_RW_ASYNC, HZ/100);
933f01d4 1088
393b47ef
MB
1089 /*
1090 * With async crypto it is unsafe to share the crypto context
1091 * between fragments, so switch to a new dm_crypt_io structure.
1092 */
1093 if (unlikely(!crypt_finished && remaining)) {
49a8a920 1094 new_io = crypt_io_alloc(io->cc, io->base_bio,
393b47ef
MB
1095 sector);
1096 crypt_inc_pending(new_io);
1097 crypt_convert_init(cc, &new_io->ctx, NULL,
1098 io->base_bio, sector);
1099 new_io->ctx.idx_in = io->ctx.idx_in;
1100 new_io->ctx.offset_in = io->ctx.offset_in;
1101
1102 /*
1103 * Fragments after the first use the base_io
1104 * pending count.
1105 */
1106 if (!io->base_io)
1107 new_io->base_io = io;
1108 else {
1109 new_io->base_io = io->base_io;
1110 crypt_inc_pending(io->base_io);
1111 crypt_dec_pending(io);
1112 }
1113
1114 io = new_io;
1115 }
93e605c2 1116 }
899c95d3
MB
1117
1118 crypt_dec_pending(io);
84131db6
MB
1119}
1120
72c6e7af 1121static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
5742fd77 1122{
5742fd77
MB
1123 crypt_dec_pending(io);
1124}
1125
4e4eef64 1126static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
8b004457 1127{
49a8a920 1128 struct crypt_config *cc = io->cc;
5742fd77 1129 int r = 0;
1da177e4 1130
3e1a8bdd 1131 crypt_inc_pending(io);
3a7f6c99 1132
53017030 1133 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
0c395b0f 1134 io->sector);
1da177e4 1135
5742fd77 1136 r = crypt_convert(cc, &io->ctx);
72c6e7af
MP
1137 if (r < 0)
1138 io->error = -EIO;
5742fd77 1139
40b6229b 1140 if (atomic_dec_and_test(&io->ctx.cc_pending))
72c6e7af 1141 kcryptd_crypt_read_done(io);
3a7f6c99
MB
1142
1143 crypt_dec_pending(io);
1da177e4
LT
1144}
1145
95497a96
MB
1146static void kcryptd_async_done(struct crypto_async_request *async_req,
1147 int error)
1148{
b2174eeb
HY
1149 struct dm_crypt_request *dmreq = async_req->data;
1150 struct convert_context *ctx = dmreq->ctx;
95497a96 1151 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
49a8a920 1152 struct crypt_config *cc = io->cc;
95497a96
MB
1153
1154 if (error == -EINPROGRESS) {
1155 complete(&ctx->restart);
1156 return;
1157 }
1158
2dc5327d
MB
1159 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1160 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1161
72c6e7af
MP
1162 if (error < 0)
1163 io->error = -EIO;
1164
b2174eeb 1165 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
95497a96 1166
40b6229b 1167 if (!atomic_dec_and_test(&ctx->cc_pending))
95497a96
MB
1168 return;
1169
1170 if (bio_data_dir(io->base_bio) == READ)
72c6e7af 1171 kcryptd_crypt_read_done(io);
95497a96 1172 else
72c6e7af 1173 kcryptd_crypt_write_io_submit(io, 1);
95497a96
MB
1174}
1175
395b167c 1176static void kcryptd_crypt(struct work_struct *work)
1da177e4 1177{
028867ac 1178 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
8b004457 1179
cabf08e4 1180 if (bio_data_dir(io->base_bio) == READ)
395b167c 1181 kcryptd_crypt_read_convert(io);
4e4eef64 1182 else
395b167c 1183 kcryptd_crypt_write_convert(io);
cabf08e4
MB
1184}
1185
395b167c 1186static void kcryptd_queue_crypt(struct dm_crypt_io *io)
cabf08e4 1187{
49a8a920 1188 struct crypt_config *cc = io->cc;
cabf08e4 1189
395b167c
AK
1190 INIT_WORK(&io->work, kcryptd_crypt);
1191 queue_work(cc->crypt_queue, &io->work);
1da177e4
LT
1192}
1193
1194/*
1195 * Decode key from its hex representation
1196 */
1197static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1198{
1199 char buffer[3];
1da177e4
LT
1200 unsigned int i;
1201
1202 buffer[2] = '\0';
1203
8b004457 1204 for (i = 0; i < size; i++) {
1da177e4
LT
1205 buffer[0] = *hex++;
1206 buffer[1] = *hex++;
1207
1a66a08a 1208 if (kstrtou8(buffer, 16, &key[i]))
1da177e4
LT
1209 return -EINVAL;
1210 }
1211
1212 if (*hex != '\0')
1213 return -EINVAL;
1214
1215 return 0;
1216}
1217
fd2d231f 1218static void crypt_free_tfms(struct crypt_config *cc)
d1f96423 1219{
d1f96423
MB
1220 unsigned i;
1221
fd2d231f
MP
1222 if (!cc->tfms)
1223 return;
1224
d1f96423 1225 for (i = 0; i < cc->tfms_count; i++)
fd2d231f
MP
1226 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1227 crypto_free_ablkcipher(cc->tfms[i]);
1228 cc->tfms[i] = NULL;
d1f96423 1229 }
fd2d231f
MP
1230
1231 kfree(cc->tfms);
1232 cc->tfms = NULL;
d1f96423
MB
1233}
1234
fd2d231f 1235static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
d1f96423 1236{
d1f96423
MB
1237 unsigned i;
1238 int err;
1239
fd2d231f
MP
1240 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1241 GFP_KERNEL);
1242 if (!cc->tfms)
1243 return -ENOMEM;
1244
d1f96423 1245 for (i = 0; i < cc->tfms_count; i++) {
fd2d231f
MP
1246 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1247 if (IS_ERR(cc->tfms[i])) {
1248 err = PTR_ERR(cc->tfms[i]);
1249 crypt_free_tfms(cc);
d1f96423
MB
1250 return err;
1251 }
1252 }
1253
1254 return 0;
1255}
1256
c0297721
AK
1257static int crypt_setkey_allcpus(struct crypt_config *cc)
1258{
d1f96423 1259 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
fd2d231f
MP
1260 int err = 0, i, r;
1261
1262 for (i = 0; i < cc->tfms_count; i++) {
1263 r = crypto_ablkcipher_setkey(cc->tfms[i],
1264 cc->key + (i * subkey_size),
1265 subkey_size);
1266 if (r)
1267 err = r;
c0297721
AK
1268 }
1269
1270 return err;
1271}
1272
e48d4bbf
MB
1273static int crypt_set_key(struct crypt_config *cc, char *key)
1274{
de8be5ac
MB
1275 int r = -EINVAL;
1276 int key_string_len = strlen(key);
1277
69a8cfcd 1278 /* The key size may not be changed. */
de8be5ac
MB
1279 if (cc->key_size != (key_string_len >> 1))
1280 goto out;
e48d4bbf 1281
69a8cfcd
MB
1282 /* Hyphen (which gives a key_size of zero) means there is no key. */
1283 if (!cc->key_size && strcmp(key, "-"))
de8be5ac 1284 goto out;
e48d4bbf 1285
3044e195
OK
1286 /* clear the flag since following operations may invalidate previously valid key */
1287 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1288
69a8cfcd 1289 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
de8be5ac 1290 goto out;
e48d4bbf 1291
de8be5ac 1292 r = crypt_setkey_allcpus(cc);
3044e195
OK
1293 if (!r)
1294 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
de8be5ac
MB
1295
1296out:
1297 /* Hex key string not needed after here, so wipe it. */
1298 memset(key, '0', key_string_len);
1299
1300 return r;
e48d4bbf
MB
1301}
1302
1303static int crypt_wipe_key(struct crypt_config *cc)
1304{
1305 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1306 memset(&cc->key, 0, cc->key_size * sizeof(u8));
c0297721
AK
1307
1308 return crypt_setkey_allcpus(cc);
e48d4bbf
MB
1309}
1310
28513fcc
MB
1311static void crypt_dtr(struct dm_target *ti)
1312{
1313 struct crypt_config *cc = ti->private;
1314
1315 ti->private = NULL;
1316
1317 if (!cc)
1318 return;
1319
1320 if (cc->io_queue)
1321 destroy_workqueue(cc->io_queue);
1322 if (cc->crypt_queue)
1323 destroy_workqueue(cc->crypt_queue);
1324
fd2d231f
MP
1325 crypt_free_tfms(cc);
1326
28513fcc
MB
1327 if (cc->bs)
1328 bioset_free(cc->bs);
1329
1330 if (cc->page_pool)
1331 mempool_destroy(cc->page_pool);
1332 if (cc->req_pool)
1333 mempool_destroy(cc->req_pool);
1334 if (cc->io_pool)
1335 mempool_destroy(cc->io_pool);
1336
1337 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1338 cc->iv_gen_ops->dtr(cc);
1339
28513fcc
MB
1340 if (cc->dev)
1341 dm_put_device(ti, cc->dev);
1342
5ebaee6d 1343 kzfree(cc->cipher);
7dbcd137 1344 kzfree(cc->cipher_string);
28513fcc
MB
1345
1346 /* Must zero key material before freeing */
1347 kzfree(cc);
1348}
1349
5ebaee6d
MB
1350static int crypt_ctr_cipher(struct dm_target *ti,
1351 char *cipher_in, char *key)
1da177e4 1352{
5ebaee6d 1353 struct crypt_config *cc = ti->private;
d1f96423 1354 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
5ebaee6d 1355 char *cipher_api = NULL;
fd2d231f 1356 int ret = -EINVAL;
31998ef1 1357 char dummy;
1da177e4 1358
5ebaee6d
MB
1359 /* Convert to crypto api definition? */
1360 if (strchr(cipher_in, '(')) {
1361 ti->error = "Bad cipher specification";
1da177e4
LT
1362 return -EINVAL;
1363 }
1364
7dbcd137
MB
1365 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1366 if (!cc->cipher_string)
1367 goto bad_mem;
1368
5ebaee6d
MB
1369 /*
1370 * Legacy dm-crypt cipher specification
d1f96423 1371 * cipher[:keycount]-mode-iv:ivopts
5ebaee6d
MB
1372 */
1373 tmp = cipher_in;
d1f96423
MB
1374 keycount = strsep(&tmp, "-");
1375 cipher = strsep(&keycount, ":");
1376
1377 if (!keycount)
1378 cc->tfms_count = 1;
31998ef1 1379 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
d1f96423
MB
1380 !is_power_of_2(cc->tfms_count)) {
1381 ti->error = "Bad cipher key count specification";
1382 return -EINVAL;
1383 }
1384 cc->key_parts = cc->tfms_count;
5ebaee6d
MB
1385
1386 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1387 if (!cc->cipher)
1388 goto bad_mem;
1389
1da177e4
LT
1390 chainmode = strsep(&tmp, "-");
1391 ivopts = strsep(&tmp, "-");
1392 ivmode = strsep(&ivopts, ":");
1393
1394 if (tmp)
5ebaee6d 1395 DMWARN("Ignoring unexpected additional cipher options");
1da177e4 1396
7dbcd137
MB
1397 /*
1398 * For compatibility with the original dm-crypt mapping format, if
1399 * only the cipher name is supplied, use cbc-plain.
1400 */
5ebaee6d 1401 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1da177e4
LT
1402 chainmode = "cbc";
1403 ivmode = "plain";
1404 }
1405
d1806f6a 1406 if (strcmp(chainmode, "ecb") && !ivmode) {
5ebaee6d
MB
1407 ti->error = "IV mechanism required";
1408 return -EINVAL;
1da177e4
LT
1409 }
1410
5ebaee6d
MB
1411 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1412 if (!cipher_api)
1413 goto bad_mem;
1414
1415 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1416 "%s(%s)", chainmode, cipher);
1417 if (ret < 0) {
1418 kfree(cipher_api);
1419 goto bad_mem;
1da177e4
LT
1420 }
1421
5ebaee6d 1422 /* Allocate cipher */
fd2d231f
MP
1423 ret = crypt_alloc_tfms(cc, cipher_api);
1424 if (ret < 0) {
1425 ti->error = "Error allocating crypto tfm";
1426 goto bad;
1da177e4 1427 }
1da177e4 1428
5ebaee6d
MB
1429 /* Initialize and set key */
1430 ret = crypt_set_key(cc, key);
28513fcc 1431 if (ret < 0) {
0b430958 1432 ti->error = "Error decoding and setting key";
28513fcc 1433 goto bad;
0b430958
MB
1434 }
1435
5ebaee6d 1436 /* Initialize IV */
c0297721 1437 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
5ebaee6d
MB
1438 if (cc->iv_size)
1439 /* at least a 64 bit sector number should fit in our buffer */
1440 cc->iv_size = max(cc->iv_size,
1441 (unsigned int)(sizeof(u64) / sizeof(u8)));
1442 else if (ivmode) {
1443 DMWARN("Selected cipher does not support IVs");
1444 ivmode = NULL;
1445 }
1446
1447 /* Choose ivmode, see comments at iv code. */
1da177e4
LT
1448 if (ivmode == NULL)
1449 cc->iv_gen_ops = NULL;
1450 else if (strcmp(ivmode, "plain") == 0)
1451 cc->iv_gen_ops = &crypt_iv_plain_ops;
61afef61
MB
1452 else if (strcmp(ivmode, "plain64") == 0)
1453 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1da177e4
LT
1454 else if (strcmp(ivmode, "essiv") == 0)
1455 cc->iv_gen_ops = &crypt_iv_essiv_ops;
48527fa7
RS
1456 else if (strcmp(ivmode, "benbi") == 0)
1457 cc->iv_gen_ops = &crypt_iv_benbi_ops;
46b47730
LN
1458 else if (strcmp(ivmode, "null") == 0)
1459 cc->iv_gen_ops = &crypt_iv_null_ops;
34745785
MB
1460 else if (strcmp(ivmode, "lmk") == 0) {
1461 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1462 /* Version 2 and 3 is recognised according
1463 * to length of provided multi-key string.
1464 * If present (version 3), last key is used as IV seed.
1465 */
1466 if (cc->key_size % cc->key_parts)
1467 cc->key_parts++;
1468 } else {
5ebaee6d 1469 ret = -EINVAL;
72d94861 1470 ti->error = "Invalid IV mode";
28513fcc 1471 goto bad;
1da177e4
LT
1472 }
1473
28513fcc
MB
1474 /* Allocate IV */
1475 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1476 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1477 if (ret < 0) {
1478 ti->error = "Error creating IV";
1479 goto bad;
1480 }
1481 }
1da177e4 1482
28513fcc
MB
1483 /* Initialize IV (set keys for ESSIV etc) */
1484 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1485 ret = cc->iv_gen_ops->init(cc);
1486 if (ret < 0) {
1487 ti->error = "Error initialising IV";
1488 goto bad;
1489 }
b95bf2d3
MB
1490 }
1491
5ebaee6d
MB
1492 ret = 0;
1493bad:
1494 kfree(cipher_api);
1495 return ret;
1496
1497bad_mem:
1498 ti->error = "Cannot allocate cipher strings";
1499 return -ENOMEM;
1500}
1501
1502/*
1503 * Construct an encryption mapping:
1504 * <cipher> <key> <iv_offset> <dev_path> <start>
1505 */
1506static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1507{
1508 struct crypt_config *cc;
772ae5f5 1509 unsigned int key_size, opt_params;
5ebaee6d
MB
1510 unsigned long long tmpll;
1511 int ret;
d283ad1a 1512 size_t iv_size_padding;
772ae5f5
MB
1513 struct dm_arg_set as;
1514 const char *opt_string;
31998ef1 1515 char dummy;
772ae5f5
MB
1516
1517 static struct dm_arg _args[] = {
1518 {0, 1, "Invalid number of feature args"},
1519 };
5ebaee6d 1520
772ae5f5 1521 if (argc < 5) {
5ebaee6d
MB
1522 ti->error = "Not enough arguments";
1523 return -EINVAL;
1da177e4
LT
1524 }
1525
5ebaee6d
MB
1526 key_size = strlen(argv[1]) >> 1;
1527
1528 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1529 if (!cc) {
1530 ti->error = "Cannot allocate encryption context";
1531 return -ENOMEM;
1532 }
69a8cfcd 1533 cc->key_size = key_size;
5ebaee6d
MB
1534
1535 ti->private = cc;
1536 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1537 if (ret < 0)
1538 goto bad;
1539
28513fcc 1540 ret = -ENOMEM;
93d2341c 1541 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1da177e4 1542 if (!cc->io_pool) {
72d94861 1543 ti->error = "Cannot allocate crypt io mempool";
28513fcc 1544 goto bad;
1da177e4
LT
1545 }
1546
ddd42edf 1547 cc->dmreq_start = sizeof(struct ablkcipher_request);
c0297721 1548 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
d283ad1a
MP
1549 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1550
1551 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1552 /* Allocate the padding exactly */
1553 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1554 & crypto_ablkcipher_alignmask(any_tfm(cc));
1555 } else {
1556 /*
1557 * If the cipher requires greater alignment than kmalloc
1558 * alignment, we don't know the exact position of the
1559 * initialization vector. We must assume worst case.
1560 */
1561 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1562 }
ddd42edf
MB
1563
1564 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
d283ad1a 1565 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
ddd42edf
MB
1566 if (!cc->req_pool) {
1567 ti->error = "Cannot allocate crypt request mempool";
28513fcc 1568 goto bad;
ddd42edf 1569 }
ddd42edf 1570
a19b27ce 1571 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1da177e4 1572 if (!cc->page_pool) {
72d94861 1573 ti->error = "Cannot allocate page mempool";
28513fcc 1574 goto bad;
1da177e4
LT
1575 }
1576
bb799ca0 1577 cc->bs = bioset_create(MIN_IOS, 0);
6a24c718
MB
1578 if (!cc->bs) {
1579 ti->error = "Cannot allocate crypt bioset";
28513fcc 1580 goto bad;
6a24c718
MB
1581 }
1582
28513fcc 1583 ret = -EINVAL;
31998ef1 1584 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
72d94861 1585 ti->error = "Invalid iv_offset sector";
28513fcc 1586 goto bad;
1da177e4 1587 }
4ee218cd 1588 cc->iv_offset = tmpll;
1da177e4 1589
28513fcc
MB
1590 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1591 ti->error = "Device lookup failed";
1592 goto bad;
1593 }
1594
31998ef1 1595 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
72d94861 1596 ti->error = "Invalid device sector";
28513fcc 1597 goto bad;
1da177e4 1598 }
4ee218cd 1599 cc->start = tmpll;
1da177e4 1600
772ae5f5
MB
1601 argv += 5;
1602 argc -= 5;
1603
1604 /* Optional parameters */
1605 if (argc) {
1606 as.argc = argc;
1607 as.argv = argv;
1608
1609 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1610 if (ret)
1611 goto bad;
1612
1613 opt_string = dm_shift_arg(&as);
1614
1615 if (opt_params == 1 && opt_string &&
1616 !strcasecmp(opt_string, "allow_discards"))
55a62eef 1617 ti->num_discard_bios = 1;
772ae5f5
MB
1618 else if (opt_params) {
1619 ret = -EINVAL;
1620 ti->error = "Invalid feature arguments";
1621 goto bad;
1622 }
1623 }
1624
28513fcc 1625 ret = -ENOMEM;
c0297721
AK
1626 cc->io_queue = alloc_workqueue("kcryptd_io",
1627 WQ_NON_REENTRANT|
1628 WQ_MEM_RECLAIM,
1629 1);
cabf08e4
MB
1630 if (!cc->io_queue) {
1631 ti->error = "Couldn't create kcryptd io queue";
28513fcc 1632 goto bad;
cabf08e4
MB
1633 }
1634
c0297721
AK
1635 cc->crypt_queue = alloc_workqueue("kcryptd",
1636 WQ_NON_REENTRANT|
1637 WQ_CPU_INTENSIVE|
1638 WQ_MEM_RECLAIM,
1639 1);
cabf08e4 1640 if (!cc->crypt_queue) {
9934a8be 1641 ti->error = "Couldn't create kcryptd queue";
28513fcc 1642 goto bad;
9934a8be
MB
1643 }
1644
55a62eef 1645 ti->num_flush_bios = 1;
0ac55489 1646 ti->discard_zeroes_data_unsupported = true;
983c7db3 1647
1da177e4
LT
1648 return 0;
1649
28513fcc
MB
1650bad:
1651 crypt_dtr(ti);
1652 return ret;
1da177e4
LT
1653}
1654
7de3ee57 1655static int crypt_map(struct dm_target *ti, struct bio *bio)
1da177e4 1656{
028867ac 1657 struct dm_crypt_io *io;
49a8a920 1658 struct crypt_config *cc = ti->private;
647c7db1 1659
772ae5f5
MB
1660 /*
1661 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1662 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1663 * - for REQ_DISCARD caller must use flush if IO ordering matters
1664 */
1665 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
647c7db1 1666 bio->bi_bdev = cc->dev->bdev;
772ae5f5
MB
1667 if (bio_sectors(bio))
1668 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
647c7db1
MP
1669 return DM_MAPIO_REMAPPED;
1670 }
1da177e4 1671
49a8a920 1672 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
cabf08e4 1673
20c82538
MB
1674 if (bio_data_dir(io->base_bio) == READ) {
1675 if (kcryptd_io_read(io, GFP_NOWAIT))
1676 kcryptd_queue_io(io);
1677 } else
cabf08e4 1678 kcryptd_queue_crypt(io);
1da177e4 1679
d2a7ad29 1680 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1681}
1682
fd7c092e
MP
1683static void crypt_status(struct dm_target *ti, status_type_t type,
1684 unsigned status_flags, char *result, unsigned maxlen)
1da177e4 1685{
5ebaee6d 1686 struct crypt_config *cc = ti->private;
fd7c092e 1687 unsigned i, sz = 0;
1da177e4
LT
1688
1689 switch (type) {
1690 case STATUSTYPE_INFO:
1691 result[0] = '\0';
1692 break;
1693
1694 case STATUSTYPE_TABLE:
7dbcd137 1695 DMEMIT("%s ", cc->cipher_string);
1da177e4 1696
fd7c092e
MP
1697 if (cc->key_size > 0)
1698 for (i = 0; i < cc->key_size; i++)
1699 DMEMIT("%02x", cc->key[i]);
1700 else
1701 DMEMIT("-");
1da177e4 1702
4ee218cd
AM
1703 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1704 cc->dev->name, (unsigned long long)cc->start);
772ae5f5 1705
55a62eef 1706 if (ti->num_discard_bios)
772ae5f5
MB
1707 DMEMIT(" 1 allow_discards");
1708
1da177e4
LT
1709 break;
1710 }
1da177e4
LT
1711}
1712
e48d4bbf
MB
1713static void crypt_postsuspend(struct dm_target *ti)
1714{
1715 struct crypt_config *cc = ti->private;
1716
1717 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1718}
1719
1720static int crypt_preresume(struct dm_target *ti)
1721{
1722 struct crypt_config *cc = ti->private;
1723
1724 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1725 DMERR("aborting resume - crypt key is not set.");
1726 return -EAGAIN;
1727 }
1728
1729 return 0;
1730}
1731
1732static void crypt_resume(struct dm_target *ti)
1733{
1734 struct crypt_config *cc = ti->private;
1735
1736 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1737}
1738
1739/* Message interface
1740 * key set <key>
1741 * key wipe
1742 */
1743static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1744{
1745 struct crypt_config *cc = ti->private;
542da317 1746 int ret = -EINVAL;
e48d4bbf
MB
1747
1748 if (argc < 2)
1749 goto error;
1750
498f0103 1751 if (!strcasecmp(argv[0], "key")) {
e48d4bbf
MB
1752 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1753 DMWARN("not suspended during key manipulation.");
1754 return -EINVAL;
1755 }
498f0103 1756 if (argc == 3 && !strcasecmp(argv[1], "set")) {
542da317
MB
1757 ret = crypt_set_key(cc, argv[2]);
1758 if (ret)
1759 return ret;
1760 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1761 ret = cc->iv_gen_ops->init(cc);
1762 return ret;
1763 }
498f0103 1764 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
542da317
MB
1765 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1766 ret = cc->iv_gen_ops->wipe(cc);
1767 if (ret)
1768 return ret;
1769 }
e48d4bbf 1770 return crypt_wipe_key(cc);
542da317 1771 }
e48d4bbf
MB
1772 }
1773
1774error:
1775 DMWARN("unrecognised message received.");
1776 return -EINVAL;
1777}
1778
d41e26b9
MB
1779static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1780 struct bio_vec *biovec, int max_size)
1781{
1782 struct crypt_config *cc = ti->private;
1783 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1784
1785 if (!q->merge_bvec_fn)
1786 return max_size;
1787
1788 bvm->bi_bdev = cc->dev->bdev;
b441a262 1789 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
d41e26b9
MB
1790
1791 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1792}
1793
af4874e0
MS
1794static int crypt_iterate_devices(struct dm_target *ti,
1795 iterate_devices_callout_fn fn, void *data)
1796{
1797 struct crypt_config *cc = ti->private;
1798
5dea271b 1799 return fn(ti, cc->dev, cc->start, ti->len, data);
af4874e0
MS
1800}
1801
1da177e4
LT
1802static struct target_type crypt_target = {
1803 .name = "crypt",
fd7c092e 1804 .version = {1, 12, 1},
1da177e4
LT
1805 .module = THIS_MODULE,
1806 .ctr = crypt_ctr,
1807 .dtr = crypt_dtr,
1808 .map = crypt_map,
1809 .status = crypt_status,
e48d4bbf
MB
1810 .postsuspend = crypt_postsuspend,
1811 .preresume = crypt_preresume,
1812 .resume = crypt_resume,
1813 .message = crypt_message,
d41e26b9 1814 .merge = crypt_merge,
af4874e0 1815 .iterate_devices = crypt_iterate_devices,
1da177e4
LT
1816};
1817
1818static int __init dm_crypt_init(void)
1819{
1820 int r;
1821
028867ac 1822 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1da177e4
LT
1823 if (!_crypt_io_pool)
1824 return -ENOMEM;
1825
1da177e4
LT
1826 r = dm_register_target(&crypt_target);
1827 if (r < 0) {
72d94861 1828 DMERR("register failed %d", r);
9934a8be 1829 kmem_cache_destroy(_crypt_io_pool);
1da177e4
LT
1830 }
1831
1da177e4
LT
1832 return r;
1833}
1834
1835static void __exit dm_crypt_exit(void)
1836{
10d3bd09 1837 dm_unregister_target(&crypt_target);
1da177e4
LT
1838 kmem_cache_destroy(_crypt_io_pool);
1839}
1840
1841module_init(dm_crypt_init);
1842module_exit(dm_crypt_exit);
1843
1844MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1845MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1846MODULE_LICENSE("GPL");