Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | |
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
d1806f6a | 8 | #include <linux/err.h> |
1da177e4 LT |
9 | #include <linux/module.h> |
10 | #include <linux/init.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/bio.h> | |
13 | #include <linux/blkdev.h> | |
14 | #include <linux/mempool.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/crypto.h> | |
17 | #include <linux/workqueue.h> | |
18 | #include <asm/atomic.h> | |
378f058c | 19 | #include <linux/scatterlist.h> |
1da177e4 LT |
20 | #include <asm/page.h> |
21 | ||
22 | #include "dm.h" | |
23 | ||
72d94861 | 24 | #define DM_MSG_PREFIX "crypt" |
1da177e4 LT |
25 | |
26 | /* | |
27 | * per bio private data | |
28 | */ | |
29 | struct crypt_io { | |
30 | struct dm_target *target; | |
31 | struct bio *bio; | |
32 | struct bio *first_clone; | |
33 | struct work_struct work; | |
34 | atomic_t pending; | |
35 | int error; | |
36 | }; | |
37 | ||
38 | /* | |
39 | * context holding the current state of a multi-part conversion | |
40 | */ | |
41 | struct convert_context { | |
42 | struct bio *bio_in; | |
43 | struct bio *bio_out; | |
44 | unsigned int offset_in; | |
45 | unsigned int offset_out; | |
46 | unsigned int idx_in; | |
47 | unsigned int idx_out; | |
48 | sector_t sector; | |
49 | int write; | |
50 | }; | |
51 | ||
52 | struct crypt_config; | |
53 | ||
54 | struct crypt_iv_operations { | |
55 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | |
56 | const char *opts); | |
57 | void (*dtr)(struct crypt_config *cc); | |
58 | const char *(*status)(struct crypt_config *cc); | |
59 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); | |
60 | }; | |
61 | ||
62 | /* | |
63 | * Crypt: maps a linear range of a block device | |
64 | * and encrypts / decrypts at the same time. | |
65 | */ | |
66 | struct crypt_config { | |
67 | struct dm_dev *dev; | |
68 | sector_t start; | |
69 | ||
70 | /* | |
71 | * pool for per bio private data and | |
72 | * for encryption buffer pages | |
73 | */ | |
74 | mempool_t *io_pool; | |
75 | mempool_t *page_pool; | |
76 | ||
77 | /* | |
78 | * crypto related data | |
79 | */ | |
80 | struct crypt_iv_operations *iv_gen_ops; | |
81 | char *iv_mode; | |
d1806f6a | 82 | struct crypto_cipher *iv_gen_private; |
1da177e4 LT |
83 | sector_t iv_offset; |
84 | unsigned int iv_size; | |
85 | ||
d1806f6a HX |
86 | char cipher[CRYPTO_MAX_ALG_NAME]; |
87 | char chainmode[CRYPTO_MAX_ALG_NAME]; | |
88 | struct crypto_blkcipher *tfm; | |
1da177e4 LT |
89 | unsigned int key_size; |
90 | u8 key[0]; | |
91 | }; | |
92 | ||
93 | #define MIN_IOS 256 | |
94 | #define MIN_POOL_PAGES 32 | |
95 | #define MIN_BIO_PAGES 8 | |
96 | ||
97 | static kmem_cache_t *_crypt_io_pool; | |
98 | ||
1da177e4 LT |
99 | /* |
100 | * Different IV generation algorithms: | |
101 | * | |
102 | * plain: the initial vector is the 32-bit low-endian version of the sector | |
103 | * number, padded with zeros if neccessary. | |
104 | * | |
105 | * ess_iv: "encrypted sector|salt initial vector", the sector number is | |
106 | * encrypted with the bulk cipher using a salt as key. The salt | |
107 | * should be derived from the bulk cipher's key via hashing. | |
108 | * | |
109 | * plumb: unimplemented, see: | |
110 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | |
111 | */ | |
112 | ||
113 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
114 | { | |
115 | memset(iv, 0, cc->iv_size); | |
116 | *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); | |
117 | ||
118 | return 0; | |
119 | } | |
120 | ||
121 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |
122 | const char *opts) | |
123 | { | |
d1806f6a | 124 | struct crypto_cipher *essiv_tfm; |
35058687 HX |
125 | struct crypto_hash *hash_tfm; |
126 | struct hash_desc desc; | |
1da177e4 LT |
127 | struct scatterlist sg; |
128 | unsigned int saltsize; | |
129 | u8 *salt; | |
d1806f6a | 130 | int err; |
1da177e4 LT |
131 | |
132 | if (opts == NULL) { | |
72d94861 | 133 | ti->error = "Digest algorithm missing for ESSIV mode"; |
1da177e4 LT |
134 | return -EINVAL; |
135 | } | |
136 | ||
137 | /* Hash the cipher key with the given hash algorithm */ | |
35058687 HX |
138 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); |
139 | if (IS_ERR(hash_tfm)) { | |
72d94861 | 140 | ti->error = "Error initializing ESSIV hash"; |
35058687 | 141 | return PTR_ERR(hash_tfm); |
1da177e4 LT |
142 | } |
143 | ||
35058687 | 144 | saltsize = crypto_hash_digestsize(hash_tfm); |
1da177e4 LT |
145 | salt = kmalloc(saltsize, GFP_KERNEL); |
146 | if (salt == NULL) { | |
72d94861 | 147 | ti->error = "Error kmallocing salt storage in ESSIV"; |
35058687 | 148 | crypto_free_hash(hash_tfm); |
1da177e4 LT |
149 | return -ENOMEM; |
150 | } | |
151 | ||
378f058c | 152 | sg_set_buf(&sg, cc->key, cc->key_size); |
35058687 HX |
153 | desc.tfm = hash_tfm; |
154 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
155 | err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); | |
156 | crypto_free_hash(hash_tfm); | |
157 | ||
158 | if (err) { | |
159 | ti->error = "Error calculating hash in ESSIV"; | |
160 | return err; | |
161 | } | |
1da177e4 LT |
162 | |
163 | /* Setup the essiv_tfm with the given salt */ | |
d1806f6a HX |
164 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
165 | if (IS_ERR(essiv_tfm)) { | |
72d94861 | 166 | ti->error = "Error allocating crypto tfm for ESSIV"; |
1da177e4 | 167 | kfree(salt); |
d1806f6a | 168 | return PTR_ERR(essiv_tfm); |
1da177e4 | 169 | } |
d1806f6a HX |
170 | if (crypto_cipher_blocksize(essiv_tfm) != |
171 | crypto_blkcipher_ivsize(cc->tfm)) { | |
72d94861 | 172 | ti->error = "Block size of ESSIV cipher does " |
1da177e4 | 173 | "not match IV size of block cipher"; |
d1806f6a | 174 | crypto_free_cipher(essiv_tfm); |
1da177e4 LT |
175 | kfree(salt); |
176 | return -EINVAL; | |
177 | } | |
d1806f6a HX |
178 | err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); |
179 | if (err) { | |
72d94861 | 180 | ti->error = "Failed to set key for ESSIV cipher"; |
d1806f6a | 181 | crypto_free_cipher(essiv_tfm); |
1da177e4 | 182 | kfree(salt); |
d1806f6a | 183 | return err; |
1da177e4 LT |
184 | } |
185 | kfree(salt); | |
186 | ||
d1806f6a | 187 | cc->iv_gen_private = essiv_tfm; |
1da177e4 LT |
188 | return 0; |
189 | } | |
190 | ||
191 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) | |
192 | { | |
d1806f6a | 193 | crypto_free_cipher(cc->iv_gen_private); |
1da177e4 LT |
194 | cc->iv_gen_private = NULL; |
195 | } | |
196 | ||
197 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
198 | { | |
1da177e4 LT |
199 | memset(iv, 0, cc->iv_size); |
200 | *(u64 *)iv = cpu_to_le64(sector); | |
d1806f6a | 201 | crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv); |
1da177e4 LT |
202 | return 0; |
203 | } | |
204 | ||
205 | static struct crypt_iv_operations crypt_iv_plain_ops = { | |
206 | .generator = crypt_iv_plain_gen | |
207 | }; | |
208 | ||
209 | static struct crypt_iv_operations crypt_iv_essiv_ops = { | |
210 | .ctr = crypt_iv_essiv_ctr, | |
211 | .dtr = crypt_iv_essiv_dtr, | |
212 | .generator = crypt_iv_essiv_gen | |
213 | }; | |
214 | ||
215 | ||
858119e1 | 216 | static int |
1da177e4 LT |
217 | crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, |
218 | struct scatterlist *in, unsigned int length, | |
219 | int write, sector_t sector) | |
220 | { | |
221 | u8 iv[cc->iv_size]; | |
d1806f6a HX |
222 | struct blkcipher_desc desc = { |
223 | .tfm = cc->tfm, | |
224 | .info = iv, | |
225 | .flags = CRYPTO_TFM_REQ_MAY_SLEEP, | |
226 | }; | |
1da177e4 LT |
227 | int r; |
228 | ||
229 | if (cc->iv_gen_ops) { | |
230 | r = cc->iv_gen_ops->generator(cc, iv, sector); | |
231 | if (r < 0) | |
232 | return r; | |
233 | ||
234 | if (write) | |
d1806f6a | 235 | r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); |
1da177e4 | 236 | else |
d1806f6a | 237 | r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); |
1da177e4 LT |
238 | } else { |
239 | if (write) | |
d1806f6a | 240 | r = crypto_blkcipher_encrypt(&desc, out, in, length); |
1da177e4 | 241 | else |
d1806f6a | 242 | r = crypto_blkcipher_decrypt(&desc, out, in, length); |
1da177e4 LT |
243 | } |
244 | ||
245 | return r; | |
246 | } | |
247 | ||
248 | static void | |
249 | crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, | |
250 | struct bio *bio_out, struct bio *bio_in, | |
251 | sector_t sector, int write) | |
252 | { | |
253 | ctx->bio_in = bio_in; | |
254 | ctx->bio_out = bio_out; | |
255 | ctx->offset_in = 0; | |
256 | ctx->offset_out = 0; | |
257 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | |
258 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | |
259 | ctx->sector = sector + cc->iv_offset; | |
260 | ctx->write = write; | |
261 | } | |
262 | ||
263 | /* | |
264 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
265 | */ | |
266 | static int crypt_convert(struct crypt_config *cc, | |
267 | struct convert_context *ctx) | |
268 | { | |
269 | int r = 0; | |
270 | ||
271 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && | |
272 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | |
273 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | |
274 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | |
275 | struct scatterlist sg_in = { | |
276 | .page = bv_in->bv_page, | |
277 | .offset = bv_in->bv_offset + ctx->offset_in, | |
278 | .length = 1 << SECTOR_SHIFT | |
279 | }; | |
280 | struct scatterlist sg_out = { | |
281 | .page = bv_out->bv_page, | |
282 | .offset = bv_out->bv_offset + ctx->offset_out, | |
283 | .length = 1 << SECTOR_SHIFT | |
284 | }; | |
285 | ||
286 | ctx->offset_in += sg_in.length; | |
287 | if (ctx->offset_in >= bv_in->bv_len) { | |
288 | ctx->offset_in = 0; | |
289 | ctx->idx_in++; | |
290 | } | |
291 | ||
292 | ctx->offset_out += sg_out.length; | |
293 | if (ctx->offset_out >= bv_out->bv_len) { | |
294 | ctx->offset_out = 0; | |
295 | ctx->idx_out++; | |
296 | } | |
297 | ||
298 | r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, | |
299 | ctx->write, ctx->sector); | |
300 | if (r < 0) | |
301 | break; | |
302 | ||
303 | ctx->sector++; | |
304 | } | |
305 | ||
306 | return r; | |
307 | } | |
308 | ||
309 | /* | |
310 | * Generate a new unfragmented bio with the given size | |
311 | * This should never violate the device limitations | |
312 | * May return a smaller bio when running out of pages | |
313 | */ | |
314 | static struct bio * | |
315 | crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | |
316 | struct bio *base_bio, unsigned int *bio_vec_idx) | |
317 | { | |
318 | struct bio *bio; | |
319 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
b4e3ca1a | 320 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
1da177e4 LT |
321 | unsigned int i; |
322 | ||
323 | /* | |
bd53b714 NP |
324 | * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and |
325 | * to fail earlier. This is not necessary but increases throughput. | |
1da177e4 LT |
326 | * FIXME: Is this really intelligent? |
327 | */ | |
1da177e4 | 328 | if (base_bio) |
bd53b714 | 329 | bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC); |
1da177e4 | 330 | else |
bd53b714 NP |
331 | bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs); |
332 | if (!bio) | |
1da177e4 | 333 | return NULL; |
1da177e4 LT |
334 | |
335 | /* if the last bio was not complete, continue where that one ended */ | |
336 | bio->bi_idx = *bio_vec_idx; | |
337 | bio->bi_vcnt = *bio_vec_idx; | |
338 | bio->bi_size = 0; | |
339 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | |
340 | ||
341 | /* bio->bi_idx pages have already been allocated */ | |
342 | size -= bio->bi_idx * PAGE_SIZE; | |
343 | ||
344 | for(i = bio->bi_idx; i < nr_iovecs; i++) { | |
345 | struct bio_vec *bv = bio_iovec_idx(bio, i); | |
346 | ||
347 | bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); | |
348 | if (!bv->bv_page) | |
349 | break; | |
350 | ||
351 | /* | |
352 | * if additional pages cannot be allocated without waiting, | |
353 | * return a partially allocated bio, the caller will then try | |
354 | * to allocate additional bios while submitting this partial bio | |
355 | */ | |
356 | if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1)) | |
357 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | |
358 | ||
359 | bv->bv_offset = 0; | |
360 | if (size > PAGE_SIZE) | |
361 | bv->bv_len = PAGE_SIZE; | |
362 | else | |
363 | bv->bv_len = size; | |
364 | ||
365 | bio->bi_size += bv->bv_len; | |
366 | bio->bi_vcnt++; | |
367 | size -= bv->bv_len; | |
368 | } | |
369 | ||
1da177e4 LT |
370 | if (!bio->bi_size) { |
371 | bio_put(bio); | |
372 | return NULL; | |
373 | } | |
374 | ||
375 | /* | |
376 | * Remember the last bio_vec allocated to be able | |
377 | * to correctly continue after the splitting. | |
378 | */ | |
379 | *bio_vec_idx = bio->bi_vcnt; | |
380 | ||
381 | return bio; | |
382 | } | |
383 | ||
384 | static void crypt_free_buffer_pages(struct crypt_config *cc, | |
385 | struct bio *bio, unsigned int bytes) | |
386 | { | |
387 | unsigned int i, start, end; | |
388 | struct bio_vec *bv; | |
389 | ||
390 | /* | |
391 | * This is ugly, but Jens Axboe thinks that using bi_idx in the | |
392 | * endio function is too dangerous at the moment, so I calculate the | |
393 | * correct position using bi_vcnt and bi_size. | |
394 | * The bv_offset and bv_len fields might already be modified but we | |
395 | * know that we always allocated whole pages. | |
396 | * A fix to the bi_idx issue in the kernel is in the works, so | |
397 | * we will hopefully be able to revert to the cleaner solution soon. | |
398 | */ | |
399 | i = bio->bi_vcnt - 1; | |
400 | bv = bio_iovec_idx(bio, i); | |
401 | end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size; | |
402 | start = end - bytes; | |
403 | ||
404 | start >>= PAGE_SHIFT; | |
405 | if (!bio->bi_size) | |
406 | end = bio->bi_vcnt; | |
407 | else | |
408 | end >>= PAGE_SHIFT; | |
409 | ||
410 | for(i = start; i < end; i++) { | |
411 | bv = bio_iovec_idx(bio, i); | |
412 | BUG_ON(!bv->bv_page); | |
413 | mempool_free(bv->bv_page, cc->page_pool); | |
414 | bv->bv_page = NULL; | |
415 | } | |
416 | } | |
417 | ||
418 | /* | |
419 | * One of the bios was finished. Check for completion of | |
420 | * the whole request and correctly clean up the buffer. | |
421 | */ | |
422 | static void dec_pending(struct crypt_io *io, int error) | |
423 | { | |
424 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
425 | ||
426 | if (error < 0) | |
427 | io->error = error; | |
428 | ||
429 | if (!atomic_dec_and_test(&io->pending)) | |
430 | return; | |
431 | ||
432 | if (io->first_clone) | |
433 | bio_put(io->first_clone); | |
434 | ||
435 | bio_endio(io->bio, io->bio->bi_size, io->error); | |
436 | ||
437 | mempool_free(io, cc->io_pool); | |
438 | } | |
439 | ||
440 | /* | |
441 | * kcryptd: | |
442 | * | |
443 | * Needed because it would be very unwise to do decryption in an | |
444 | * interrupt context, so bios returning from read requests get | |
445 | * queued here. | |
446 | */ | |
447 | static struct workqueue_struct *_kcryptd_workqueue; | |
448 | ||
449 | static void kcryptd_do_work(void *data) | |
450 | { | |
451 | struct crypt_io *io = (struct crypt_io *) data; | |
452 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
453 | struct convert_context ctx; | |
454 | int r; | |
455 | ||
456 | crypt_convert_init(cc, &ctx, io->bio, io->bio, | |
457 | io->bio->bi_sector - io->target->begin, 0); | |
458 | r = crypt_convert(cc, &ctx); | |
459 | ||
460 | dec_pending(io, r); | |
461 | } | |
462 | ||
463 | static void kcryptd_queue_io(struct crypt_io *io) | |
464 | { | |
465 | INIT_WORK(&io->work, kcryptd_do_work, io); | |
466 | queue_work(_kcryptd_workqueue, &io->work); | |
467 | } | |
468 | ||
469 | /* | |
470 | * Decode key from its hex representation | |
471 | */ | |
472 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) | |
473 | { | |
474 | char buffer[3]; | |
475 | char *endp; | |
476 | unsigned int i; | |
477 | ||
478 | buffer[2] = '\0'; | |
479 | ||
480 | for(i = 0; i < size; i++) { | |
481 | buffer[0] = *hex++; | |
482 | buffer[1] = *hex++; | |
483 | ||
484 | key[i] = (u8)simple_strtoul(buffer, &endp, 16); | |
485 | ||
486 | if (endp != &buffer[2]) | |
487 | return -EINVAL; | |
488 | } | |
489 | ||
490 | if (*hex != '\0') | |
491 | return -EINVAL; | |
492 | ||
493 | return 0; | |
494 | } | |
495 | ||
496 | /* | |
497 | * Encode key into its hex representation | |
498 | */ | |
499 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |
500 | { | |
501 | unsigned int i; | |
502 | ||
503 | for(i = 0; i < size; i++) { | |
504 | sprintf(hex, "%02x", *key); | |
505 | hex += 2; | |
506 | key++; | |
507 | } | |
508 | } | |
509 | ||
510 | /* | |
511 | * Construct an encryption mapping: | |
512 | * <cipher> <key> <iv_offset> <dev_path> <start> | |
513 | */ | |
514 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
515 | { | |
516 | struct crypt_config *cc; | |
d1806f6a | 517 | struct crypto_blkcipher *tfm; |
1da177e4 LT |
518 | char *tmp; |
519 | char *cipher; | |
520 | char *chainmode; | |
521 | char *ivmode; | |
522 | char *ivopts; | |
1da177e4 | 523 | unsigned int key_size; |
4ee218cd | 524 | unsigned long long tmpll; |
1da177e4 LT |
525 | |
526 | if (argc != 5) { | |
72d94861 | 527 | ti->error = "Not enough arguments"; |
1da177e4 LT |
528 | return -EINVAL; |
529 | } | |
530 | ||
531 | tmp = argv[0]; | |
532 | cipher = strsep(&tmp, "-"); | |
533 | chainmode = strsep(&tmp, "-"); | |
534 | ivopts = strsep(&tmp, "-"); | |
535 | ivmode = strsep(&ivopts, ":"); | |
536 | ||
537 | if (tmp) | |
72d94861 | 538 | DMWARN("Unexpected additional cipher options"); |
1da177e4 LT |
539 | |
540 | key_size = strlen(argv[1]) >> 1; | |
541 | ||
542 | cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | |
543 | if (cc == NULL) { | |
544 | ti->error = | |
72d94861 | 545 | "Cannot allocate transparent encryption context"; |
1da177e4 LT |
546 | return -ENOMEM; |
547 | } | |
548 | ||
549 | cc->key_size = key_size; | |
550 | if ((!key_size && strcmp(argv[1], "-") != 0) || | |
551 | (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) { | |
72d94861 | 552 | ti->error = "Error decoding key"; |
1da177e4 LT |
553 | goto bad1; |
554 | } | |
555 | ||
556 | /* Compatiblity mode for old dm-crypt cipher strings */ | |
557 | if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { | |
558 | chainmode = "cbc"; | |
559 | ivmode = "plain"; | |
560 | } | |
561 | ||
d1806f6a HX |
562 | if (strcmp(chainmode, "ecb") && !ivmode) { |
563 | ti->error = "This chaining mode requires an IV mechanism"; | |
1da177e4 LT |
564 | goto bad1; |
565 | } | |
566 | ||
d1806f6a HX |
567 | if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, |
568 | cipher) >= CRYPTO_MAX_ALG_NAME) { | |
569 | ti->error = "Chain mode + cipher name is too long"; | |
1da177e4 LT |
570 | goto bad1; |
571 | } | |
572 | ||
d1806f6a HX |
573 | tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
574 | if (IS_ERR(tfm)) { | |
72d94861 | 575 | ti->error = "Error allocating crypto tfm"; |
1da177e4 LT |
576 | goto bad1; |
577 | } | |
1da177e4 | 578 | |
d1806f6a HX |
579 | strcpy(cc->cipher, cipher); |
580 | strcpy(cc->chainmode, chainmode); | |
1da177e4 LT |
581 | cc->tfm = tfm; |
582 | ||
583 | /* | |
584 | * Choose ivmode. Valid modes: "plain", "essiv:<esshash>". | |
585 | * See comments at iv code | |
586 | */ | |
587 | ||
588 | if (ivmode == NULL) | |
589 | cc->iv_gen_ops = NULL; | |
590 | else if (strcmp(ivmode, "plain") == 0) | |
591 | cc->iv_gen_ops = &crypt_iv_plain_ops; | |
592 | else if (strcmp(ivmode, "essiv") == 0) | |
593 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | |
594 | else { | |
72d94861 | 595 | ti->error = "Invalid IV mode"; |
1da177e4 LT |
596 | goto bad2; |
597 | } | |
598 | ||
599 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && | |
600 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | |
601 | goto bad2; | |
602 | ||
d1806f6a HX |
603 | cc->iv_size = crypto_blkcipher_ivsize(tfm); |
604 | if (cc->iv_size) | |
1da177e4 | 605 | /* at least a 64 bit sector number should fit in our buffer */ |
d1806f6a | 606 | cc->iv_size = max(cc->iv_size, |
1da177e4 LT |
607 | (unsigned int)(sizeof(u64) / sizeof(u8))); |
608 | else { | |
1da177e4 | 609 | if (cc->iv_gen_ops) { |
72d94861 | 610 | DMWARN("Selected cipher does not support IVs"); |
1da177e4 LT |
611 | if (cc->iv_gen_ops->dtr) |
612 | cc->iv_gen_ops->dtr(cc); | |
613 | cc->iv_gen_ops = NULL; | |
614 | } | |
615 | } | |
616 | ||
93d2341c | 617 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1da177e4 | 618 | if (!cc->io_pool) { |
72d94861 | 619 | ti->error = "Cannot allocate crypt io mempool"; |
1da177e4 LT |
620 | goto bad3; |
621 | } | |
622 | ||
a19b27ce | 623 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1da177e4 | 624 | if (!cc->page_pool) { |
72d94861 | 625 | ti->error = "Cannot allocate page mempool"; |
1da177e4 LT |
626 | goto bad4; |
627 | } | |
628 | ||
d1806f6a | 629 | if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { |
72d94861 | 630 | ti->error = "Error setting key"; |
1da177e4 LT |
631 | goto bad5; |
632 | } | |
633 | ||
4ee218cd | 634 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
72d94861 | 635 | ti->error = "Invalid iv_offset sector"; |
1da177e4 LT |
636 | goto bad5; |
637 | } | |
4ee218cd | 638 | cc->iv_offset = tmpll; |
1da177e4 | 639 | |
4ee218cd | 640 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
72d94861 | 641 | ti->error = "Invalid device sector"; |
1da177e4 LT |
642 | goto bad5; |
643 | } | |
4ee218cd | 644 | cc->start = tmpll; |
1da177e4 LT |
645 | |
646 | if (dm_get_device(ti, argv[3], cc->start, ti->len, | |
647 | dm_table_get_mode(ti->table), &cc->dev)) { | |
72d94861 | 648 | ti->error = "Device lookup failed"; |
1da177e4 LT |
649 | goto bad5; |
650 | } | |
651 | ||
652 | if (ivmode && cc->iv_gen_ops) { | |
653 | if (ivopts) | |
654 | *(ivopts - 1) = ':'; | |
655 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); | |
656 | if (!cc->iv_mode) { | |
72d94861 | 657 | ti->error = "Error kmallocing iv_mode string"; |
1da177e4 LT |
658 | goto bad5; |
659 | } | |
660 | strcpy(cc->iv_mode, ivmode); | |
661 | } else | |
662 | cc->iv_mode = NULL; | |
663 | ||
664 | ti->private = cc; | |
665 | return 0; | |
666 | ||
667 | bad5: | |
668 | mempool_destroy(cc->page_pool); | |
669 | bad4: | |
670 | mempool_destroy(cc->io_pool); | |
671 | bad3: | |
672 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | |
673 | cc->iv_gen_ops->dtr(cc); | |
674 | bad2: | |
d1806f6a | 675 | crypto_free_blkcipher(tfm); |
1da177e4 | 676 | bad1: |
9d3520a3 SR |
677 | /* Must zero key material before freeing */ |
678 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | |
1da177e4 LT |
679 | kfree(cc); |
680 | return -EINVAL; | |
681 | } | |
682 | ||
683 | static void crypt_dtr(struct dm_target *ti) | |
684 | { | |
685 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
686 | ||
687 | mempool_destroy(cc->page_pool); | |
688 | mempool_destroy(cc->io_pool); | |
689 | ||
990a8baf | 690 | kfree(cc->iv_mode); |
1da177e4 LT |
691 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
692 | cc->iv_gen_ops->dtr(cc); | |
d1806f6a | 693 | crypto_free_blkcipher(cc->tfm); |
1da177e4 | 694 | dm_put_device(ti, cc->dev); |
9d3520a3 SR |
695 | |
696 | /* Must zero key material before freeing */ | |
697 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | |
1da177e4 LT |
698 | kfree(cc); |
699 | } | |
700 | ||
701 | static int crypt_endio(struct bio *bio, unsigned int done, int error) | |
702 | { | |
703 | struct crypt_io *io = (struct crypt_io *) bio->bi_private; | |
704 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
705 | ||
706 | if (bio_data_dir(bio) == WRITE) { | |
707 | /* | |
708 | * free the processed pages, even if | |
709 | * it's only a partially completed write | |
710 | */ | |
711 | crypt_free_buffer_pages(cc, bio, done); | |
712 | } | |
713 | ||
714 | if (bio->bi_size) | |
715 | return 1; | |
716 | ||
717 | bio_put(bio); | |
718 | ||
719 | /* | |
720 | * successful reads are decrypted by the worker thread | |
721 | */ | |
722 | if ((bio_data_dir(bio) == READ) | |
723 | && bio_flagged(bio, BIO_UPTODATE)) { | |
724 | kcryptd_queue_io(io); | |
725 | return 0; | |
726 | } | |
727 | ||
728 | dec_pending(io, error); | |
729 | return error; | |
730 | } | |
731 | ||
732 | static inline struct bio * | |
733 | crypt_clone(struct crypt_config *cc, struct crypt_io *io, struct bio *bio, | |
734 | sector_t sector, unsigned int *bvec_idx, | |
735 | struct convert_context *ctx) | |
736 | { | |
737 | struct bio *clone; | |
738 | ||
739 | if (bio_data_dir(bio) == WRITE) { | |
740 | clone = crypt_alloc_buffer(cc, bio->bi_size, | |
741 | io->first_clone, bvec_idx); | |
742 | if (clone) { | |
743 | ctx->bio_out = clone; | |
744 | if (crypt_convert(cc, ctx) < 0) { | |
745 | crypt_free_buffer_pages(cc, clone, | |
746 | clone->bi_size); | |
747 | bio_put(clone); | |
748 | return NULL; | |
749 | } | |
750 | } | |
751 | } else { | |
752 | /* | |
753 | * The block layer might modify the bvec array, so always | |
754 | * copy the required bvecs because we need the original | |
755 | * one in order to decrypt the whole bio data *afterwards*. | |
756 | */ | |
757 | clone = bio_alloc(GFP_NOIO, bio_segments(bio)); | |
758 | if (clone) { | |
759 | clone->bi_idx = 0; | |
760 | clone->bi_vcnt = bio_segments(bio); | |
761 | clone->bi_size = bio->bi_size; | |
762 | memcpy(clone->bi_io_vec, bio_iovec(bio), | |
763 | sizeof(struct bio_vec) * clone->bi_vcnt); | |
764 | } | |
765 | } | |
766 | ||
767 | if (!clone) | |
768 | return NULL; | |
769 | ||
770 | clone->bi_private = io; | |
771 | clone->bi_end_io = crypt_endio; | |
772 | clone->bi_bdev = cc->dev->bdev; | |
773 | clone->bi_sector = cc->start + sector; | |
774 | clone->bi_rw = bio->bi_rw; | |
775 | ||
776 | return clone; | |
777 | } | |
778 | ||
779 | static int crypt_map(struct dm_target *ti, struct bio *bio, | |
780 | union map_info *map_context) | |
781 | { | |
782 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
783 | struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
784 | struct convert_context ctx; | |
785 | struct bio *clone; | |
786 | unsigned int remaining = bio->bi_size; | |
787 | sector_t sector = bio->bi_sector - ti->begin; | |
788 | unsigned int bvec_idx = 0; | |
789 | ||
790 | io->target = ti; | |
791 | io->bio = bio; | |
792 | io->first_clone = NULL; | |
793 | io->error = 0; | |
794 | atomic_set(&io->pending, 1); /* hold a reference */ | |
795 | ||
796 | if (bio_data_dir(bio) == WRITE) | |
797 | crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); | |
798 | ||
799 | /* | |
800 | * The allocated buffers can be smaller than the whole bio, | |
801 | * so repeat the whole process until all the data can be handled. | |
802 | */ | |
803 | while (remaining) { | |
804 | clone = crypt_clone(cc, io, bio, sector, &bvec_idx, &ctx); | |
805 | if (!clone) | |
806 | goto cleanup; | |
807 | ||
808 | if (!io->first_clone) { | |
809 | /* | |
810 | * hold a reference to the first clone, because it | |
811 | * holds the bio_vec array and that can't be freed | |
812 | * before all other clones are released | |
813 | */ | |
814 | bio_get(clone); | |
815 | io->first_clone = clone; | |
816 | } | |
817 | atomic_inc(&io->pending); | |
818 | ||
819 | remaining -= clone->bi_size; | |
820 | sector += bio_sectors(clone); | |
821 | ||
822 | generic_make_request(clone); | |
823 | ||
824 | /* out of memory -> run queues */ | |
825 | if (remaining) | |
826 | blk_congestion_wait(bio_data_dir(clone), HZ/100); | |
827 | } | |
828 | ||
829 | /* drop reference, clones could have returned before we reach this */ | |
830 | dec_pending(io, 0); | |
831 | return 0; | |
832 | ||
833 | cleanup: | |
834 | if (io->first_clone) { | |
835 | dec_pending(io, -ENOMEM); | |
836 | return 0; | |
837 | } | |
838 | ||
839 | /* if no bio has been dispatched yet, we can directly return the error */ | |
840 | mempool_free(io, cc->io_pool); | |
841 | return -ENOMEM; | |
842 | } | |
843 | ||
844 | static int crypt_status(struct dm_target *ti, status_type_t type, | |
845 | char *result, unsigned int maxlen) | |
846 | { | |
847 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
848 | const char *cipher; | |
849 | const char *chainmode = NULL; | |
850 | unsigned int sz = 0; | |
851 | ||
852 | switch (type) { | |
853 | case STATUSTYPE_INFO: | |
854 | result[0] = '\0'; | |
855 | break; | |
856 | ||
857 | case STATUSTYPE_TABLE: | |
d1806f6a | 858 | cipher = crypto_blkcipher_name(cc->tfm); |
1da177e4 | 859 | |
d1806f6a | 860 | chainmode = cc->chainmode; |
1da177e4 LT |
861 | |
862 | if (cc->iv_mode) | |
863 | DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); | |
864 | else | |
865 | DMEMIT("%s-%s ", cipher, chainmode); | |
866 | ||
867 | if (cc->key_size > 0) { | |
868 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) | |
869 | return -ENOMEM; | |
870 | ||
871 | crypt_encode_key(result + sz, cc->key, cc->key_size); | |
872 | sz += cc->key_size << 1; | |
873 | } else { | |
874 | if (sz >= maxlen) | |
875 | return -ENOMEM; | |
876 | result[sz++] = '-'; | |
877 | } | |
878 | ||
4ee218cd AM |
879 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
880 | cc->dev->name, (unsigned long long)cc->start); | |
1da177e4 LT |
881 | break; |
882 | } | |
883 | return 0; | |
884 | } | |
885 | ||
886 | static struct target_type crypt_target = { | |
887 | .name = "crypt", | |
888 | .version= {1, 1, 0}, | |
889 | .module = THIS_MODULE, | |
890 | .ctr = crypt_ctr, | |
891 | .dtr = crypt_dtr, | |
892 | .map = crypt_map, | |
893 | .status = crypt_status, | |
894 | }; | |
895 | ||
896 | static int __init dm_crypt_init(void) | |
897 | { | |
898 | int r; | |
899 | ||
900 | _crypt_io_pool = kmem_cache_create("dm-crypt_io", | |
901 | sizeof(struct crypt_io), | |
902 | 0, 0, NULL, NULL); | |
903 | if (!_crypt_io_pool) | |
904 | return -ENOMEM; | |
905 | ||
906 | _kcryptd_workqueue = create_workqueue("kcryptd"); | |
907 | if (!_kcryptd_workqueue) { | |
908 | r = -ENOMEM; | |
72d94861 | 909 | DMERR("couldn't create kcryptd"); |
1da177e4 LT |
910 | goto bad1; |
911 | } | |
912 | ||
913 | r = dm_register_target(&crypt_target); | |
914 | if (r < 0) { | |
72d94861 | 915 | DMERR("register failed %d", r); |
1da177e4 LT |
916 | goto bad2; |
917 | } | |
918 | ||
919 | return 0; | |
920 | ||
921 | bad2: | |
922 | destroy_workqueue(_kcryptd_workqueue); | |
923 | bad1: | |
924 | kmem_cache_destroy(_crypt_io_pool); | |
925 | return r; | |
926 | } | |
927 | ||
928 | static void __exit dm_crypt_exit(void) | |
929 | { | |
930 | int r = dm_unregister_target(&crypt_target); | |
931 | ||
932 | if (r < 0) | |
72d94861 | 933 | DMERR("unregister failed %d", r); |
1da177e4 LT |
934 | |
935 | destroy_workqueue(_kcryptd_workqueue); | |
936 | kmem_cache_destroy(_crypt_io_pool); | |
937 | } | |
938 | ||
939 | module_init(dm_crypt_init); | |
940 | module_exit(dm_crypt_exit); | |
941 | ||
942 | MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); | |
943 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
944 | MODULE_LICENSE("GPL"); |