Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | |
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> | |
542da317 | 4 | * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
43d69034 | 9 | #include <linux/completion.h> |
d1806f6a | 10 | #include <linux/err.h> |
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/bio.h> | |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/crypto.h> | |
19 | #include <linux/workqueue.h> | |
3fcfab16 | 20 | #include <linux/backing-dev.h> |
c0297721 | 21 | #include <linux/percpu.h> |
1da177e4 | 22 | #include <asm/atomic.h> |
378f058c | 23 | #include <linux/scatterlist.h> |
1da177e4 | 24 | #include <asm/page.h> |
48527fa7 | 25 | #include <asm/unaligned.h> |
1da177e4 | 26 | |
586e80e6 | 27 | #include <linux/device-mapper.h> |
1da177e4 | 28 | |
72d94861 | 29 | #define DM_MSG_PREFIX "crypt" |
e48d4bbf | 30 | #define MESG_STR(x) x, sizeof(x) |
1da177e4 | 31 | |
1da177e4 LT |
32 | /* |
33 | * context holding the current state of a multi-part conversion | |
34 | */ | |
35 | struct convert_context { | |
43d69034 | 36 | struct completion restart; |
1da177e4 LT |
37 | struct bio *bio_in; |
38 | struct bio *bio_out; | |
39 | unsigned int offset_in; | |
40 | unsigned int offset_out; | |
41 | unsigned int idx_in; | |
42 | unsigned int idx_out; | |
43 | sector_t sector; | |
43d69034 | 44 | atomic_t pending; |
1da177e4 LT |
45 | }; |
46 | ||
53017030 MB |
47 | /* |
48 | * per bio private data | |
49 | */ | |
50 | struct dm_crypt_io { | |
51 | struct dm_target *target; | |
52 | struct bio *base_bio; | |
53 | struct work_struct work; | |
54 | ||
55 | struct convert_context ctx; | |
56 | ||
57 | atomic_t pending; | |
58 | int error; | |
0c395b0f | 59 | sector_t sector; |
393b47ef | 60 | struct dm_crypt_io *base_io; |
53017030 MB |
61 | }; |
62 | ||
01482b76 | 63 | struct dm_crypt_request { |
b2174eeb | 64 | struct convert_context *ctx; |
01482b76 MB |
65 | struct scatterlist sg_in; |
66 | struct scatterlist sg_out; | |
2dc5327d | 67 | sector_t iv_sector; |
01482b76 MB |
68 | }; |
69 | ||
1da177e4 LT |
70 | struct crypt_config; |
71 | ||
72 | struct crypt_iv_operations { | |
73 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | |
d469f841 | 74 | const char *opts); |
1da177e4 | 75 | void (*dtr)(struct crypt_config *cc); |
b95bf2d3 | 76 | int (*init)(struct crypt_config *cc); |
542da317 | 77 | int (*wipe)(struct crypt_config *cc); |
2dc5327d MB |
78 | int (*generator)(struct crypt_config *cc, u8 *iv, |
79 | struct dm_crypt_request *dmreq); | |
80 | int (*post)(struct crypt_config *cc, u8 *iv, | |
81 | struct dm_crypt_request *dmreq); | |
1da177e4 LT |
82 | }; |
83 | ||
60473592 | 84 | struct iv_essiv_private { |
b95bf2d3 MB |
85 | struct crypto_hash *hash_tfm; |
86 | u8 *salt; | |
60473592 MB |
87 | }; |
88 | ||
89 | struct iv_benbi_private { | |
90 | int shift; | |
91 | }; | |
92 | ||
1da177e4 LT |
93 | /* |
94 | * Crypt: maps a linear range of a block device | |
95 | * and encrypts / decrypts at the same time. | |
96 | */ | |
e48d4bbf | 97 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
c0297721 AK |
98 | |
99 | /* | |
100 | * Duplicated per-CPU state for cipher. | |
101 | */ | |
102 | struct crypt_cpu { | |
103 | struct ablkcipher_request *req; | |
104 | struct crypto_ablkcipher *tfm; | |
105 | ||
106 | /* ESSIV: struct crypto_cipher *essiv_tfm */ | |
107 | void *iv_private; | |
108 | }; | |
109 | ||
110 | /* | |
111 | * The fields in here must be read only after initialization, | |
112 | * changing state should be in crypt_cpu. | |
113 | */ | |
1da177e4 LT |
114 | struct crypt_config { |
115 | struct dm_dev *dev; | |
116 | sector_t start; | |
117 | ||
118 | /* | |
ddd42edf MB |
119 | * pool for per bio private data, crypto requests and |
120 | * encryption requeusts/buffer pages | |
1da177e4 LT |
121 | */ |
122 | mempool_t *io_pool; | |
ddd42edf | 123 | mempool_t *req_pool; |
1da177e4 | 124 | mempool_t *page_pool; |
6a24c718 | 125 | struct bio_set *bs; |
1da177e4 | 126 | |
cabf08e4 MB |
127 | struct workqueue_struct *io_queue; |
128 | struct workqueue_struct *crypt_queue; | |
3f1e9070 | 129 | |
5ebaee6d | 130 | char *cipher; |
7dbcd137 | 131 | char *cipher_string; |
5ebaee6d | 132 | |
1da177e4 | 133 | struct crypt_iv_operations *iv_gen_ops; |
79066ad3 | 134 | union { |
60473592 MB |
135 | struct iv_essiv_private essiv; |
136 | struct iv_benbi_private benbi; | |
79066ad3 | 137 | } iv_gen_private; |
1da177e4 LT |
138 | sector_t iv_offset; |
139 | unsigned int iv_size; | |
140 | ||
c0297721 AK |
141 | /* |
142 | * Duplicated per cpu state. Access through | |
143 | * per_cpu_ptr() only. | |
144 | */ | |
145 | struct crypt_cpu __percpu *cpu; | |
146 | ||
ddd42edf MB |
147 | /* |
148 | * Layout of each crypto request: | |
149 | * | |
150 | * struct ablkcipher_request | |
151 | * context | |
152 | * padding | |
153 | * struct dm_crypt_request | |
154 | * padding | |
155 | * IV | |
156 | * | |
157 | * The padding is added so that dm_crypt_request and the IV are | |
158 | * correctly aligned. | |
159 | */ | |
160 | unsigned int dmreq_start; | |
ddd42edf | 161 | |
e48d4bbf | 162 | unsigned long flags; |
1da177e4 LT |
163 | unsigned int key_size; |
164 | u8 key[0]; | |
165 | }; | |
166 | ||
6a24c718 | 167 | #define MIN_IOS 16 |
1da177e4 LT |
168 | #define MIN_POOL_PAGES 32 |
169 | #define MIN_BIO_PAGES 8 | |
170 | ||
e18b890b | 171 | static struct kmem_cache *_crypt_io_pool; |
1da177e4 | 172 | |
028867ac | 173 | static void clone_init(struct dm_crypt_io *, struct bio *); |
395b167c | 174 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
2dc5327d | 175 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); |
027581f3 | 176 | |
c0297721 AK |
177 | static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) |
178 | { | |
179 | return this_cpu_ptr(cc->cpu); | |
180 | } | |
181 | ||
182 | /* | |
183 | * Use this to access cipher attributes that are the same for each CPU. | |
184 | */ | |
185 | static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) | |
186 | { | |
187 | return __this_cpu_ptr(cc->cpu)->tfm; | |
188 | } | |
189 | ||
1da177e4 LT |
190 | /* |
191 | * Different IV generation algorithms: | |
192 | * | |
3c164bd8 | 193 | * plain: the initial vector is the 32-bit little-endian version of the sector |
3a4fa0a2 | 194 | * number, padded with zeros if necessary. |
1da177e4 | 195 | * |
61afef61 MB |
196 | * plain64: the initial vector is the 64-bit little-endian version of the sector |
197 | * number, padded with zeros if necessary. | |
198 | * | |
3c164bd8 RS |
199 | * essiv: "encrypted sector|salt initial vector", the sector number is |
200 | * encrypted with the bulk cipher using a salt as key. The salt | |
201 | * should be derived from the bulk cipher's key via hashing. | |
1da177e4 | 202 | * |
48527fa7 RS |
203 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 |
204 | * (needed for LRW-32-AES and possible other narrow block modes) | |
205 | * | |
46b47730 LN |
206 | * null: the initial vector is always zero. Provides compatibility with |
207 | * obsolete loop_fish2 devices. Do not use for new devices. | |
208 | * | |
1da177e4 LT |
209 | * plumb: unimplemented, see: |
210 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | |
211 | */ | |
212 | ||
2dc5327d MB |
213 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, |
214 | struct dm_crypt_request *dmreq) | |
1da177e4 LT |
215 | { |
216 | memset(iv, 0, cc->iv_size); | |
2dc5327d | 217 | *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); |
1da177e4 LT |
218 | |
219 | return 0; | |
220 | } | |
221 | ||
61afef61 | 222 | static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, |
2dc5327d | 223 | struct dm_crypt_request *dmreq) |
61afef61 MB |
224 | { |
225 | memset(iv, 0, cc->iv_size); | |
2dc5327d | 226 | *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); |
61afef61 MB |
227 | |
228 | return 0; | |
229 | } | |
230 | ||
b95bf2d3 MB |
231 | /* Initialise ESSIV - compute salt but no local memory allocations */ |
232 | static int crypt_iv_essiv_init(struct crypt_config *cc) | |
233 | { | |
234 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | |
235 | struct hash_desc desc; | |
236 | struct scatterlist sg; | |
c0297721 AK |
237 | struct crypto_cipher *essiv_tfm; |
238 | int err, cpu; | |
b95bf2d3 MB |
239 | |
240 | sg_init_one(&sg, cc->key, cc->key_size); | |
241 | desc.tfm = essiv->hash_tfm; | |
242 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
243 | ||
244 | err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); | |
245 | if (err) | |
246 | return err; | |
247 | ||
c0297721 AK |
248 | for_each_possible_cpu(cpu) { |
249 | essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private, | |
250 | ||
251 | err = crypto_cipher_setkey(essiv_tfm, essiv->salt, | |
b95bf2d3 | 252 | crypto_hash_digestsize(essiv->hash_tfm)); |
c0297721 AK |
253 | if (err) |
254 | return err; | |
255 | } | |
256 | ||
257 | return 0; | |
b95bf2d3 MB |
258 | } |
259 | ||
542da317 MB |
260 | /* Wipe salt and reset key derived from volume key */ |
261 | static int crypt_iv_essiv_wipe(struct crypt_config *cc) | |
262 | { | |
263 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | |
264 | unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); | |
c0297721 AK |
265 | struct crypto_cipher *essiv_tfm; |
266 | int cpu, r, err = 0; | |
542da317 MB |
267 | |
268 | memset(essiv->salt, 0, salt_size); | |
269 | ||
c0297721 AK |
270 | for_each_possible_cpu(cpu) { |
271 | essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private; | |
272 | r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); | |
273 | if (r) | |
274 | err = r; | |
275 | } | |
276 | ||
277 | return err; | |
278 | } | |
279 | ||
280 | /* Set up per cpu cipher state */ | |
281 | static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, | |
282 | struct dm_target *ti, | |
283 | u8 *salt, unsigned saltsize) | |
284 | { | |
285 | struct crypto_cipher *essiv_tfm; | |
286 | int err; | |
287 | ||
288 | /* Setup the essiv_tfm with the given salt */ | |
289 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); | |
290 | if (IS_ERR(essiv_tfm)) { | |
291 | ti->error = "Error allocating crypto tfm for ESSIV"; | |
292 | return essiv_tfm; | |
293 | } | |
294 | ||
295 | if (crypto_cipher_blocksize(essiv_tfm) != | |
296 | crypto_ablkcipher_ivsize(any_tfm(cc))) { | |
297 | ti->error = "Block size of ESSIV cipher does " | |
298 | "not match IV size of block cipher"; | |
299 | crypto_free_cipher(essiv_tfm); | |
300 | return ERR_PTR(-EINVAL); | |
301 | } | |
302 | ||
303 | err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); | |
304 | if (err) { | |
305 | ti->error = "Failed to set key for ESSIV cipher"; | |
306 | crypto_free_cipher(essiv_tfm); | |
307 | return ERR_PTR(err); | |
308 | } | |
309 | ||
310 | return essiv_tfm; | |
542da317 MB |
311 | } |
312 | ||
60473592 MB |
313 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) |
314 | { | |
c0297721 AK |
315 | int cpu; |
316 | struct crypt_cpu *cpu_cc; | |
317 | struct crypto_cipher *essiv_tfm; | |
60473592 MB |
318 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
319 | ||
b95bf2d3 MB |
320 | crypto_free_hash(essiv->hash_tfm); |
321 | essiv->hash_tfm = NULL; | |
322 | ||
323 | kzfree(essiv->salt); | |
324 | essiv->salt = NULL; | |
c0297721 AK |
325 | |
326 | for_each_possible_cpu(cpu) { | |
327 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); | |
328 | essiv_tfm = cpu_cc->iv_private; | |
329 | ||
330 | if (essiv_tfm) | |
331 | crypto_free_cipher(essiv_tfm); | |
332 | ||
333 | cpu_cc->iv_private = NULL; | |
334 | } | |
60473592 MB |
335 | } |
336 | ||
1da177e4 | 337 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
d469f841 | 338 | const char *opts) |
1da177e4 | 339 | { |
5861f1be MB |
340 | struct crypto_cipher *essiv_tfm = NULL; |
341 | struct crypto_hash *hash_tfm = NULL; | |
5861f1be | 342 | u8 *salt = NULL; |
c0297721 | 343 | int err, cpu; |
1da177e4 | 344 | |
5861f1be | 345 | if (!opts) { |
72d94861 | 346 | ti->error = "Digest algorithm missing for ESSIV mode"; |
1da177e4 LT |
347 | return -EINVAL; |
348 | } | |
349 | ||
b95bf2d3 | 350 | /* Allocate hash algorithm */ |
35058687 HX |
351 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); |
352 | if (IS_ERR(hash_tfm)) { | |
72d94861 | 353 | ti->error = "Error initializing ESSIV hash"; |
5861f1be MB |
354 | err = PTR_ERR(hash_tfm); |
355 | goto bad; | |
1da177e4 LT |
356 | } |
357 | ||
b95bf2d3 | 358 | salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); |
5861f1be | 359 | if (!salt) { |
72d94861 | 360 | ti->error = "Error kmallocing salt storage in ESSIV"; |
5861f1be MB |
361 | err = -ENOMEM; |
362 | goto bad; | |
1da177e4 LT |
363 | } |
364 | ||
b95bf2d3 | 365 | cc->iv_gen_private.essiv.salt = salt; |
b95bf2d3 MB |
366 | cc->iv_gen_private.essiv.hash_tfm = hash_tfm; |
367 | ||
c0297721 AK |
368 | for_each_possible_cpu(cpu) { |
369 | essiv_tfm = setup_essiv_cpu(cc, ti, salt, | |
370 | crypto_hash_digestsize(hash_tfm)); | |
371 | if (IS_ERR(essiv_tfm)) { | |
372 | crypt_iv_essiv_dtr(cc); | |
373 | return PTR_ERR(essiv_tfm); | |
374 | } | |
375 | per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm; | |
376 | } | |
377 | ||
1da177e4 | 378 | return 0; |
5861f1be MB |
379 | |
380 | bad: | |
5861f1be MB |
381 | if (hash_tfm && !IS_ERR(hash_tfm)) |
382 | crypto_free_hash(hash_tfm); | |
b95bf2d3 | 383 | kfree(salt); |
5861f1be | 384 | return err; |
1da177e4 LT |
385 | } |
386 | ||
2dc5327d MB |
387 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, |
388 | struct dm_crypt_request *dmreq) | |
1da177e4 | 389 | { |
c0297721 AK |
390 | struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; |
391 | ||
1da177e4 | 392 | memset(iv, 0, cc->iv_size); |
2dc5327d | 393 | *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); |
c0297721 AK |
394 | crypto_cipher_encrypt_one(essiv_tfm, iv, iv); |
395 | ||
1da177e4 LT |
396 | return 0; |
397 | } | |
398 | ||
48527fa7 RS |
399 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, |
400 | const char *opts) | |
401 | { | |
c0297721 | 402 | unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); |
f0d1b0b3 | 403 | int log = ilog2(bs); |
48527fa7 RS |
404 | |
405 | /* we need to calculate how far we must shift the sector count | |
406 | * to get the cipher block count, we use this shift in _gen */ | |
407 | ||
408 | if (1 << log != bs) { | |
409 | ti->error = "cypher blocksize is not a power of 2"; | |
410 | return -EINVAL; | |
411 | } | |
412 | ||
413 | if (log > 9) { | |
414 | ti->error = "cypher blocksize is > 512"; | |
415 | return -EINVAL; | |
416 | } | |
417 | ||
60473592 | 418 | cc->iv_gen_private.benbi.shift = 9 - log; |
48527fa7 RS |
419 | |
420 | return 0; | |
421 | } | |
422 | ||
423 | static void crypt_iv_benbi_dtr(struct crypt_config *cc) | |
424 | { | |
48527fa7 RS |
425 | } |
426 | ||
2dc5327d MB |
427 | static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, |
428 | struct dm_crypt_request *dmreq) | |
48527fa7 | 429 | { |
79066ad3 HX |
430 | __be64 val; |
431 | ||
48527fa7 | 432 | memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ |
79066ad3 | 433 | |
2dc5327d | 434 | val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); |
79066ad3 | 435 | put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); |
48527fa7 | 436 | |
1da177e4 LT |
437 | return 0; |
438 | } | |
439 | ||
2dc5327d MB |
440 | static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, |
441 | struct dm_crypt_request *dmreq) | |
46b47730 LN |
442 | { |
443 | memset(iv, 0, cc->iv_size); | |
444 | ||
445 | return 0; | |
446 | } | |
447 | ||
1da177e4 LT |
448 | static struct crypt_iv_operations crypt_iv_plain_ops = { |
449 | .generator = crypt_iv_plain_gen | |
450 | }; | |
451 | ||
61afef61 MB |
452 | static struct crypt_iv_operations crypt_iv_plain64_ops = { |
453 | .generator = crypt_iv_plain64_gen | |
454 | }; | |
455 | ||
1da177e4 LT |
456 | static struct crypt_iv_operations crypt_iv_essiv_ops = { |
457 | .ctr = crypt_iv_essiv_ctr, | |
458 | .dtr = crypt_iv_essiv_dtr, | |
b95bf2d3 | 459 | .init = crypt_iv_essiv_init, |
542da317 | 460 | .wipe = crypt_iv_essiv_wipe, |
1da177e4 LT |
461 | .generator = crypt_iv_essiv_gen |
462 | }; | |
463 | ||
48527fa7 RS |
464 | static struct crypt_iv_operations crypt_iv_benbi_ops = { |
465 | .ctr = crypt_iv_benbi_ctr, | |
466 | .dtr = crypt_iv_benbi_dtr, | |
467 | .generator = crypt_iv_benbi_gen | |
468 | }; | |
1da177e4 | 469 | |
46b47730 LN |
470 | static struct crypt_iv_operations crypt_iv_null_ops = { |
471 | .generator = crypt_iv_null_gen | |
472 | }; | |
473 | ||
d469f841 MB |
474 | static void crypt_convert_init(struct crypt_config *cc, |
475 | struct convert_context *ctx, | |
476 | struct bio *bio_out, struct bio *bio_in, | |
fcd369da | 477 | sector_t sector) |
1da177e4 LT |
478 | { |
479 | ctx->bio_in = bio_in; | |
480 | ctx->bio_out = bio_out; | |
481 | ctx->offset_in = 0; | |
482 | ctx->offset_out = 0; | |
483 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | |
484 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | |
485 | ctx->sector = sector + cc->iv_offset; | |
43d69034 | 486 | init_completion(&ctx->restart); |
1da177e4 LT |
487 | } |
488 | ||
b2174eeb HY |
489 | static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, |
490 | struct ablkcipher_request *req) | |
491 | { | |
492 | return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); | |
493 | } | |
494 | ||
495 | static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, | |
496 | struct dm_crypt_request *dmreq) | |
497 | { | |
498 | return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); | |
499 | } | |
500 | ||
2dc5327d MB |
501 | static u8 *iv_of_dmreq(struct crypt_config *cc, |
502 | struct dm_crypt_request *dmreq) | |
503 | { | |
504 | return (u8 *)ALIGN((unsigned long)(dmreq + 1), | |
505 | crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); | |
506 | } | |
507 | ||
01482b76 | 508 | static int crypt_convert_block(struct crypt_config *cc, |
3a7f6c99 MB |
509 | struct convert_context *ctx, |
510 | struct ablkcipher_request *req) | |
01482b76 MB |
511 | { |
512 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | |
513 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | |
3a7f6c99 MB |
514 | struct dm_crypt_request *dmreq; |
515 | u8 *iv; | |
516 | int r = 0; | |
517 | ||
b2174eeb | 518 | dmreq = dmreq_of_req(cc, req); |
2dc5327d | 519 | iv = iv_of_dmreq(cc, dmreq); |
01482b76 | 520 | |
2dc5327d | 521 | dmreq->iv_sector = ctx->sector; |
b2174eeb | 522 | dmreq->ctx = ctx; |
3a7f6c99 MB |
523 | sg_init_table(&dmreq->sg_in, 1); |
524 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, | |
01482b76 MB |
525 | bv_in->bv_offset + ctx->offset_in); |
526 | ||
3a7f6c99 MB |
527 | sg_init_table(&dmreq->sg_out, 1); |
528 | sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, | |
01482b76 MB |
529 | bv_out->bv_offset + ctx->offset_out); |
530 | ||
531 | ctx->offset_in += 1 << SECTOR_SHIFT; | |
532 | if (ctx->offset_in >= bv_in->bv_len) { | |
533 | ctx->offset_in = 0; | |
534 | ctx->idx_in++; | |
535 | } | |
536 | ||
537 | ctx->offset_out += 1 << SECTOR_SHIFT; | |
538 | if (ctx->offset_out >= bv_out->bv_len) { | |
539 | ctx->offset_out = 0; | |
540 | ctx->idx_out++; | |
541 | } | |
542 | ||
3a7f6c99 | 543 | if (cc->iv_gen_ops) { |
2dc5327d | 544 | r = cc->iv_gen_ops->generator(cc, iv, dmreq); |
3a7f6c99 MB |
545 | if (r < 0) |
546 | return r; | |
547 | } | |
548 | ||
549 | ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, | |
550 | 1 << SECTOR_SHIFT, iv); | |
551 | ||
552 | if (bio_data_dir(ctx->bio_in) == WRITE) | |
553 | r = crypto_ablkcipher_encrypt(req); | |
554 | else | |
555 | r = crypto_ablkcipher_decrypt(req); | |
556 | ||
2dc5327d MB |
557 | if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) |
558 | r = cc->iv_gen_ops->post(cc, iv, dmreq); | |
559 | ||
3a7f6c99 | 560 | return r; |
01482b76 MB |
561 | } |
562 | ||
95497a96 MB |
563 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
564 | int error); | |
c0297721 | 565 | |
ddd42edf MB |
566 | static void crypt_alloc_req(struct crypt_config *cc, |
567 | struct convert_context *ctx) | |
568 | { | |
c0297721 AK |
569 | struct crypt_cpu *this_cc = this_crypt_config(cc); |
570 | ||
571 | if (!this_cc->req) | |
572 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | |
573 | ||
574 | ablkcipher_request_set_tfm(this_cc->req, this_cc->tfm); | |
575 | ablkcipher_request_set_callback(this_cc->req, | |
576 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | |
577 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); | |
ddd42edf MB |
578 | } |
579 | ||
1da177e4 LT |
580 | /* |
581 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
582 | */ | |
583 | static int crypt_convert(struct crypt_config *cc, | |
d469f841 | 584 | struct convert_context *ctx) |
1da177e4 | 585 | { |
c0297721 | 586 | struct crypt_cpu *this_cc = this_crypt_config(cc); |
3f1e9070 | 587 | int r; |
1da177e4 | 588 | |
c8081618 MB |
589 | atomic_set(&ctx->pending, 1); |
590 | ||
1da177e4 LT |
591 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && |
592 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | |
1da177e4 | 593 | |
3a7f6c99 MB |
594 | crypt_alloc_req(cc, ctx); |
595 | ||
3f1e9070 MB |
596 | atomic_inc(&ctx->pending); |
597 | ||
c0297721 | 598 | r = crypt_convert_block(cc, ctx, this_cc->req); |
3a7f6c99 MB |
599 | |
600 | switch (r) { | |
3f1e9070 | 601 | /* async */ |
3a7f6c99 MB |
602 | case -EBUSY: |
603 | wait_for_completion(&ctx->restart); | |
604 | INIT_COMPLETION(ctx->restart); | |
605 | /* fall through*/ | |
606 | case -EINPROGRESS: | |
c0297721 | 607 | this_cc->req = NULL; |
3f1e9070 MB |
608 | ctx->sector++; |
609 | continue; | |
610 | ||
611 | /* sync */ | |
3a7f6c99 | 612 | case 0: |
3f1e9070 | 613 | atomic_dec(&ctx->pending); |
3a7f6c99 | 614 | ctx->sector++; |
c7f1b204 | 615 | cond_resched(); |
3a7f6c99 | 616 | continue; |
3a7f6c99 | 617 | |
3f1e9070 MB |
618 | /* error */ |
619 | default: | |
620 | atomic_dec(&ctx->pending); | |
621 | return r; | |
622 | } | |
1da177e4 LT |
623 | } |
624 | ||
3f1e9070 | 625 | return 0; |
1da177e4 LT |
626 | } |
627 | ||
d469f841 MB |
628 | static void dm_crypt_bio_destructor(struct bio *bio) |
629 | { | |
028867ac | 630 | struct dm_crypt_io *io = bio->bi_private; |
6a24c718 MB |
631 | struct crypt_config *cc = io->target->private; |
632 | ||
633 | bio_free(bio, cc->bs); | |
d469f841 | 634 | } |
6a24c718 | 635 | |
1da177e4 LT |
636 | /* |
637 | * Generate a new unfragmented bio with the given size | |
638 | * This should never violate the device limitations | |
933f01d4 MB |
639 | * May return a smaller bio when running out of pages, indicated by |
640 | * *out_of_pages set to 1. | |
1da177e4 | 641 | */ |
933f01d4 MB |
642 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, |
643 | unsigned *out_of_pages) | |
1da177e4 | 644 | { |
027581f3 | 645 | struct crypt_config *cc = io->target->private; |
8b004457 | 646 | struct bio *clone; |
1da177e4 | 647 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
b4e3ca1a | 648 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
91e10625 MB |
649 | unsigned i, len; |
650 | struct page *page; | |
1da177e4 | 651 | |
2f9941b6 | 652 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
8b004457 | 653 | if (!clone) |
1da177e4 | 654 | return NULL; |
1da177e4 | 655 | |
027581f3 | 656 | clone_init(io, clone); |
933f01d4 | 657 | *out_of_pages = 0; |
6a24c718 | 658 | |
f97380bc | 659 | for (i = 0; i < nr_iovecs; i++) { |
91e10625 | 660 | page = mempool_alloc(cc->page_pool, gfp_mask); |
933f01d4 MB |
661 | if (!page) { |
662 | *out_of_pages = 1; | |
1da177e4 | 663 | break; |
933f01d4 | 664 | } |
1da177e4 LT |
665 | |
666 | /* | |
667 | * if additional pages cannot be allocated without waiting, | |
668 | * return a partially allocated bio, the caller will then try | |
669 | * to allocate additional bios while submitting this partial bio | |
670 | */ | |
f97380bc | 671 | if (i == (MIN_BIO_PAGES - 1)) |
1da177e4 LT |
672 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
673 | ||
91e10625 MB |
674 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
675 | ||
676 | if (!bio_add_page(clone, page, len, 0)) { | |
677 | mempool_free(page, cc->page_pool); | |
678 | break; | |
679 | } | |
1da177e4 | 680 | |
91e10625 | 681 | size -= len; |
1da177e4 LT |
682 | } |
683 | ||
8b004457 MB |
684 | if (!clone->bi_size) { |
685 | bio_put(clone); | |
1da177e4 LT |
686 | return NULL; |
687 | } | |
688 | ||
8b004457 | 689 | return clone; |
1da177e4 LT |
690 | } |
691 | ||
644bd2f0 | 692 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) |
1da177e4 | 693 | { |
644bd2f0 | 694 | unsigned int i; |
1da177e4 LT |
695 | struct bio_vec *bv; |
696 | ||
644bd2f0 | 697 | for (i = 0; i < clone->bi_vcnt; i++) { |
8b004457 | 698 | bv = bio_iovec_idx(clone, i); |
1da177e4 LT |
699 | BUG_ON(!bv->bv_page); |
700 | mempool_free(bv->bv_page, cc->page_pool); | |
701 | bv->bv_page = NULL; | |
702 | } | |
703 | } | |
704 | ||
dc440d1e MB |
705 | static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, |
706 | struct bio *bio, sector_t sector) | |
707 | { | |
708 | struct crypt_config *cc = ti->private; | |
709 | struct dm_crypt_io *io; | |
710 | ||
711 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
712 | io->target = ti; | |
713 | io->base_bio = bio; | |
714 | io->sector = sector; | |
715 | io->error = 0; | |
393b47ef | 716 | io->base_io = NULL; |
dc440d1e MB |
717 | atomic_set(&io->pending, 0); |
718 | ||
719 | return io; | |
720 | } | |
721 | ||
3e1a8bdd MB |
722 | static void crypt_inc_pending(struct dm_crypt_io *io) |
723 | { | |
724 | atomic_inc(&io->pending); | |
725 | } | |
726 | ||
1da177e4 LT |
727 | /* |
728 | * One of the bios was finished. Check for completion of | |
729 | * the whole request and correctly clean up the buffer. | |
393b47ef | 730 | * If base_io is set, wait for the last fragment to complete. |
1da177e4 | 731 | */ |
5742fd77 | 732 | static void crypt_dec_pending(struct dm_crypt_io *io) |
1da177e4 | 733 | { |
5742fd77 | 734 | struct crypt_config *cc = io->target->private; |
b35f8caa MB |
735 | struct bio *base_bio = io->base_bio; |
736 | struct dm_crypt_io *base_io = io->base_io; | |
737 | int error = io->error; | |
1da177e4 LT |
738 | |
739 | if (!atomic_dec_and_test(&io->pending)) | |
740 | return; | |
741 | ||
b35f8caa MB |
742 | mempool_free(io, cc->io_pool); |
743 | ||
744 | if (likely(!base_io)) | |
745 | bio_endio(base_bio, error); | |
393b47ef | 746 | else { |
b35f8caa MB |
747 | if (error && !base_io->error) |
748 | base_io->error = error; | |
749 | crypt_dec_pending(base_io); | |
393b47ef | 750 | } |
1da177e4 LT |
751 | } |
752 | ||
753 | /* | |
cabf08e4 | 754 | * kcryptd/kcryptd_io: |
1da177e4 LT |
755 | * |
756 | * Needed because it would be very unwise to do decryption in an | |
23541d2d | 757 | * interrupt context. |
cabf08e4 MB |
758 | * |
759 | * kcryptd performs the actual encryption or decryption. | |
760 | * | |
761 | * kcryptd_io performs the IO submission. | |
762 | * | |
763 | * They must be separated as otherwise the final stages could be | |
764 | * starved by new requests which can block in the first stages due | |
765 | * to memory allocation. | |
c0297721 AK |
766 | * |
767 | * The work is done per CPU global for all dm-crypt instances. | |
768 | * They should not depend on each other and do not block. | |
1da177e4 | 769 | */ |
6712ecf8 | 770 | static void crypt_endio(struct bio *clone, int error) |
8b004457 | 771 | { |
028867ac | 772 | struct dm_crypt_io *io = clone->bi_private; |
8b004457 | 773 | struct crypt_config *cc = io->target->private; |
ee7a491e | 774 | unsigned rw = bio_data_dir(clone); |
8b004457 | 775 | |
adfe4770 MB |
776 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) |
777 | error = -EIO; | |
778 | ||
8b004457 | 779 | /* |
6712ecf8 | 780 | * free the processed pages |
8b004457 | 781 | */ |
ee7a491e | 782 | if (rw == WRITE) |
644bd2f0 | 783 | crypt_free_buffer_pages(cc, clone); |
8b004457 MB |
784 | |
785 | bio_put(clone); | |
8b004457 | 786 | |
ee7a491e MB |
787 | if (rw == READ && !error) { |
788 | kcryptd_queue_crypt(io); | |
789 | return; | |
790 | } | |
5742fd77 MB |
791 | |
792 | if (unlikely(error)) | |
793 | io->error = error; | |
794 | ||
795 | crypt_dec_pending(io); | |
8b004457 MB |
796 | } |
797 | ||
028867ac | 798 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
8b004457 MB |
799 | { |
800 | struct crypt_config *cc = io->target->private; | |
801 | ||
802 | clone->bi_private = io; | |
803 | clone->bi_end_io = crypt_endio; | |
804 | clone->bi_bdev = cc->dev->bdev; | |
805 | clone->bi_rw = io->base_bio->bi_rw; | |
027581f3 | 806 | clone->bi_destructor = dm_crypt_bio_destructor; |
8b004457 MB |
807 | } |
808 | ||
20c82538 MB |
809 | static void kcryptd_unplug(struct crypt_config *cc) |
810 | { | |
811 | blk_unplug(bdev_get_queue(cc->dev->bdev)); | |
812 | } | |
813 | ||
814 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | |
8b004457 MB |
815 | { |
816 | struct crypt_config *cc = io->target->private; | |
817 | struct bio *base_bio = io->base_bio; | |
818 | struct bio *clone; | |
93e605c2 | 819 | |
8b004457 MB |
820 | /* |
821 | * The block layer might modify the bvec array, so always | |
822 | * copy the required bvecs because we need the original | |
823 | * one in order to decrypt the whole bio data *afterwards*. | |
824 | */ | |
20c82538 MB |
825 | clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); |
826 | if (!clone) { | |
827 | kcryptd_unplug(cc); | |
828 | return 1; | |
93e605c2 | 829 | } |
8b004457 | 830 | |
20c82538 MB |
831 | crypt_inc_pending(io); |
832 | ||
8b004457 MB |
833 | clone_init(io, clone); |
834 | clone->bi_idx = 0; | |
835 | clone->bi_vcnt = bio_segments(base_bio); | |
836 | clone->bi_size = base_bio->bi_size; | |
0c395b0f | 837 | clone->bi_sector = cc->start + io->sector; |
8b004457 MB |
838 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), |
839 | sizeof(struct bio_vec) * clone->bi_vcnt); | |
8b004457 | 840 | |
93e605c2 | 841 | generic_make_request(clone); |
20c82538 | 842 | return 0; |
8b004457 MB |
843 | } |
844 | ||
4e4eef64 MB |
845 | static void kcryptd_io_write(struct dm_crypt_io *io) |
846 | { | |
95497a96 | 847 | struct bio *clone = io->ctx.bio_out; |
95497a96 | 848 | generic_make_request(clone); |
4e4eef64 MB |
849 | } |
850 | ||
395b167c AK |
851 | static void kcryptd_io(struct work_struct *work) |
852 | { | |
853 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
854 | ||
20c82538 MB |
855 | if (bio_data_dir(io->base_bio) == READ) { |
856 | crypt_inc_pending(io); | |
857 | if (kcryptd_io_read(io, GFP_NOIO)) | |
858 | io->error = -ENOMEM; | |
859 | crypt_dec_pending(io); | |
860 | } else | |
395b167c AK |
861 | kcryptd_io_write(io); |
862 | } | |
863 | ||
864 | static void kcryptd_queue_io(struct dm_crypt_io *io) | |
865 | { | |
866 | struct crypt_config *cc = io->target->private; | |
867 | ||
868 | INIT_WORK(&io->work, kcryptd_io); | |
869 | queue_work(cc->io_queue, &io->work); | |
870 | } | |
871 | ||
95497a96 MB |
872 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, |
873 | int error, int async) | |
4e4eef64 | 874 | { |
dec1cedf MB |
875 | struct bio *clone = io->ctx.bio_out; |
876 | struct crypt_config *cc = io->target->private; | |
877 | ||
878 | if (unlikely(error < 0)) { | |
879 | crypt_free_buffer_pages(cc, clone); | |
880 | bio_put(clone); | |
881 | io->error = -EIO; | |
6c031f41 | 882 | crypt_dec_pending(io); |
dec1cedf MB |
883 | return; |
884 | } | |
885 | ||
886 | /* crypt_convert should have filled the clone bio */ | |
887 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); | |
888 | ||
889 | clone->bi_sector = cc->start + io->sector; | |
899c95d3 | 890 | |
95497a96 MB |
891 | if (async) |
892 | kcryptd_queue_io(io); | |
1e37bb8e | 893 | else |
95497a96 | 894 | generic_make_request(clone); |
4e4eef64 MB |
895 | } |
896 | ||
fc5a5e9a | 897 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
8b004457 MB |
898 | { |
899 | struct crypt_config *cc = io->target->private; | |
8b004457 | 900 | struct bio *clone; |
393b47ef | 901 | struct dm_crypt_io *new_io; |
c8081618 | 902 | int crypt_finished; |
933f01d4 | 903 | unsigned out_of_pages = 0; |
dec1cedf | 904 | unsigned remaining = io->base_bio->bi_size; |
b635b00e | 905 | sector_t sector = io->sector; |
dec1cedf | 906 | int r; |
8b004457 | 907 | |
fc5a5e9a MB |
908 | /* |
909 | * Prevent io from disappearing until this function completes. | |
910 | */ | |
911 | crypt_inc_pending(io); | |
b635b00e | 912 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); |
fc5a5e9a | 913 | |
93e605c2 MB |
914 | /* |
915 | * The allocated buffers can be smaller than the whole bio, | |
916 | * so repeat the whole process until all the data can be handled. | |
917 | */ | |
918 | while (remaining) { | |
933f01d4 | 919 | clone = crypt_alloc_buffer(io, remaining, &out_of_pages); |
23541d2d | 920 | if (unlikely(!clone)) { |
5742fd77 | 921 | io->error = -ENOMEM; |
fc5a5e9a | 922 | break; |
23541d2d | 923 | } |
93e605c2 | 924 | |
53017030 MB |
925 | io->ctx.bio_out = clone; |
926 | io->ctx.idx_out = 0; | |
93e605c2 | 927 | |
dec1cedf | 928 | remaining -= clone->bi_size; |
b635b00e | 929 | sector += bio_sectors(clone); |
93e605c2 | 930 | |
4e594098 | 931 | crypt_inc_pending(io); |
dec1cedf | 932 | r = crypt_convert(cc, &io->ctx); |
c8081618 | 933 | crypt_finished = atomic_dec_and_test(&io->ctx.pending); |
f97380bc | 934 | |
c8081618 MB |
935 | /* Encryption was already finished, submit io now */ |
936 | if (crypt_finished) { | |
3a7f6c99 | 937 | kcryptd_crypt_write_io_submit(io, r, 0); |
c8081618 MB |
938 | |
939 | /* | |
940 | * If there was an error, do not try next fragments. | |
941 | * For async, error is processed in async handler. | |
942 | */ | |
6c031f41 | 943 | if (unlikely(r < 0)) |
fc5a5e9a | 944 | break; |
b635b00e MB |
945 | |
946 | io->sector = sector; | |
4e594098 | 947 | } |
93e605c2 | 948 | |
933f01d4 MB |
949 | /* |
950 | * Out of memory -> run queues | |
951 | * But don't wait if split was due to the io size restriction | |
952 | */ | |
953 | if (unlikely(out_of_pages)) | |
8aa7e847 | 954 | congestion_wait(BLK_RW_ASYNC, HZ/100); |
933f01d4 | 955 | |
393b47ef MB |
956 | /* |
957 | * With async crypto it is unsafe to share the crypto context | |
958 | * between fragments, so switch to a new dm_crypt_io structure. | |
959 | */ | |
960 | if (unlikely(!crypt_finished && remaining)) { | |
961 | new_io = crypt_io_alloc(io->target, io->base_bio, | |
962 | sector); | |
963 | crypt_inc_pending(new_io); | |
964 | crypt_convert_init(cc, &new_io->ctx, NULL, | |
965 | io->base_bio, sector); | |
966 | new_io->ctx.idx_in = io->ctx.idx_in; | |
967 | new_io->ctx.offset_in = io->ctx.offset_in; | |
968 | ||
969 | /* | |
970 | * Fragments after the first use the base_io | |
971 | * pending count. | |
972 | */ | |
973 | if (!io->base_io) | |
974 | new_io->base_io = io; | |
975 | else { | |
976 | new_io->base_io = io->base_io; | |
977 | crypt_inc_pending(io->base_io); | |
978 | crypt_dec_pending(io); | |
979 | } | |
980 | ||
981 | io = new_io; | |
982 | } | |
93e605c2 | 983 | } |
899c95d3 MB |
984 | |
985 | crypt_dec_pending(io); | |
84131db6 MB |
986 | } |
987 | ||
4e4eef64 | 988 | static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) |
5742fd77 MB |
989 | { |
990 | if (unlikely(error < 0)) | |
991 | io->error = -EIO; | |
992 | ||
993 | crypt_dec_pending(io); | |
994 | } | |
995 | ||
4e4eef64 | 996 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
8b004457 MB |
997 | { |
998 | struct crypt_config *cc = io->target->private; | |
5742fd77 | 999 | int r = 0; |
1da177e4 | 1000 | |
3e1a8bdd | 1001 | crypt_inc_pending(io); |
3a7f6c99 | 1002 | |
53017030 | 1003 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
0c395b0f | 1004 | io->sector); |
1da177e4 | 1005 | |
5742fd77 MB |
1006 | r = crypt_convert(cc, &io->ctx); |
1007 | ||
3f1e9070 | 1008 | if (atomic_dec_and_test(&io->ctx.pending)) |
3a7f6c99 MB |
1009 | kcryptd_crypt_read_done(io, r); |
1010 | ||
1011 | crypt_dec_pending(io); | |
1da177e4 LT |
1012 | } |
1013 | ||
95497a96 MB |
1014 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
1015 | int error) | |
1016 | { | |
b2174eeb HY |
1017 | struct dm_crypt_request *dmreq = async_req->data; |
1018 | struct convert_context *ctx = dmreq->ctx; | |
95497a96 MB |
1019 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
1020 | struct crypt_config *cc = io->target->private; | |
1021 | ||
1022 | if (error == -EINPROGRESS) { | |
1023 | complete(&ctx->restart); | |
1024 | return; | |
1025 | } | |
1026 | ||
2dc5327d MB |
1027 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) |
1028 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); | |
1029 | ||
b2174eeb | 1030 | mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); |
95497a96 MB |
1031 | |
1032 | if (!atomic_dec_and_test(&ctx->pending)) | |
1033 | return; | |
1034 | ||
1035 | if (bio_data_dir(io->base_bio) == READ) | |
1036 | kcryptd_crypt_read_done(io, error); | |
1037 | else | |
1038 | kcryptd_crypt_write_io_submit(io, error, 1); | |
1039 | } | |
1040 | ||
395b167c | 1041 | static void kcryptd_crypt(struct work_struct *work) |
1da177e4 | 1042 | { |
028867ac | 1043 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
8b004457 | 1044 | |
cabf08e4 | 1045 | if (bio_data_dir(io->base_bio) == READ) |
395b167c | 1046 | kcryptd_crypt_read_convert(io); |
4e4eef64 | 1047 | else |
395b167c | 1048 | kcryptd_crypt_write_convert(io); |
cabf08e4 MB |
1049 | } |
1050 | ||
395b167c | 1051 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
cabf08e4 | 1052 | { |
395b167c | 1053 | struct crypt_config *cc = io->target->private; |
cabf08e4 | 1054 | |
395b167c AK |
1055 | INIT_WORK(&io->work, kcryptd_crypt); |
1056 | queue_work(cc->crypt_queue, &io->work); | |
1da177e4 LT |
1057 | } |
1058 | ||
1059 | /* | |
1060 | * Decode key from its hex representation | |
1061 | */ | |
1062 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) | |
1063 | { | |
1064 | char buffer[3]; | |
1065 | char *endp; | |
1066 | unsigned int i; | |
1067 | ||
1068 | buffer[2] = '\0'; | |
1069 | ||
8b004457 | 1070 | for (i = 0; i < size; i++) { |
1da177e4 LT |
1071 | buffer[0] = *hex++; |
1072 | buffer[1] = *hex++; | |
1073 | ||
1074 | key[i] = (u8)simple_strtoul(buffer, &endp, 16); | |
1075 | ||
1076 | if (endp != &buffer[2]) | |
1077 | return -EINVAL; | |
1078 | } | |
1079 | ||
1080 | if (*hex != '\0') | |
1081 | return -EINVAL; | |
1082 | ||
1083 | return 0; | |
1084 | } | |
1085 | ||
1086 | /* | |
1087 | * Encode key into its hex representation | |
1088 | */ | |
1089 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |
1090 | { | |
1091 | unsigned int i; | |
1092 | ||
8b004457 | 1093 | for (i = 0; i < size; i++) { |
1da177e4 LT |
1094 | sprintf(hex, "%02x", *key); |
1095 | hex += 2; | |
1096 | key++; | |
1097 | } | |
1098 | } | |
1099 | ||
c0297721 AK |
1100 | static int crypt_setkey_allcpus(struct crypt_config *cc) |
1101 | { | |
1102 | int cpu, err = 0, r; | |
1103 | ||
1104 | for_each_possible_cpu(cpu) { | |
1105 | r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfm, | |
1106 | cc->key, cc->key_size); | |
1107 | if (r) | |
1108 | err = r; | |
1109 | } | |
1110 | ||
1111 | return err; | |
1112 | } | |
1113 | ||
e48d4bbf MB |
1114 | static int crypt_set_key(struct crypt_config *cc, char *key) |
1115 | { | |
69a8cfcd MB |
1116 | /* The key size may not be changed. */ |
1117 | if (cc->key_size != (strlen(key) >> 1)) | |
e48d4bbf MB |
1118 | return -EINVAL; |
1119 | ||
69a8cfcd MB |
1120 | /* Hyphen (which gives a key_size of zero) means there is no key. */ |
1121 | if (!cc->key_size && strcmp(key, "-")) | |
1122 | return -EINVAL; | |
e48d4bbf | 1123 | |
69a8cfcd | 1124 | if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) |
e48d4bbf MB |
1125 | return -EINVAL; |
1126 | ||
1127 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
1128 | ||
c0297721 | 1129 | return crypt_setkey_allcpus(cc); |
e48d4bbf MB |
1130 | } |
1131 | ||
1132 | static int crypt_wipe_key(struct crypt_config *cc) | |
1133 | { | |
1134 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
1135 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); | |
c0297721 AK |
1136 | |
1137 | return crypt_setkey_allcpus(cc); | |
e48d4bbf MB |
1138 | } |
1139 | ||
28513fcc MB |
1140 | static void crypt_dtr(struct dm_target *ti) |
1141 | { | |
1142 | struct crypt_config *cc = ti->private; | |
c0297721 AK |
1143 | struct crypt_cpu *cpu_cc; |
1144 | int cpu; | |
28513fcc MB |
1145 | |
1146 | ti->private = NULL; | |
1147 | ||
1148 | if (!cc) | |
1149 | return; | |
1150 | ||
1151 | if (cc->io_queue) | |
1152 | destroy_workqueue(cc->io_queue); | |
1153 | if (cc->crypt_queue) | |
1154 | destroy_workqueue(cc->crypt_queue); | |
1155 | ||
c0297721 AK |
1156 | if (cc->cpu) |
1157 | for_each_possible_cpu(cpu) { | |
1158 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); | |
1159 | if (cpu_cc->req) | |
1160 | mempool_free(cpu_cc->req, cc->req_pool); | |
1161 | if (cpu_cc->tfm) | |
1162 | crypto_free_ablkcipher(cpu_cc->tfm); | |
1163 | } | |
1164 | ||
28513fcc MB |
1165 | if (cc->bs) |
1166 | bioset_free(cc->bs); | |
1167 | ||
1168 | if (cc->page_pool) | |
1169 | mempool_destroy(cc->page_pool); | |
1170 | if (cc->req_pool) | |
1171 | mempool_destroy(cc->req_pool); | |
1172 | if (cc->io_pool) | |
1173 | mempool_destroy(cc->io_pool); | |
1174 | ||
1175 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | |
1176 | cc->iv_gen_ops->dtr(cc); | |
1177 | ||
28513fcc MB |
1178 | if (cc->dev) |
1179 | dm_put_device(ti, cc->dev); | |
1180 | ||
c0297721 AK |
1181 | if (cc->cpu) |
1182 | free_percpu(cc->cpu); | |
1183 | ||
5ebaee6d | 1184 | kzfree(cc->cipher); |
7dbcd137 | 1185 | kzfree(cc->cipher_string); |
28513fcc MB |
1186 | |
1187 | /* Must zero key material before freeing */ | |
1188 | kzfree(cc); | |
1189 | } | |
1190 | ||
5ebaee6d MB |
1191 | static int crypt_ctr_cipher(struct dm_target *ti, |
1192 | char *cipher_in, char *key) | |
1da177e4 | 1193 | { |
5ebaee6d | 1194 | struct crypt_config *cc = ti->private; |
c0297721 | 1195 | struct crypto_ablkcipher *tfm; |
5ebaee6d MB |
1196 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts; |
1197 | char *cipher_api = NULL; | |
c0297721 | 1198 | int cpu, ret = -EINVAL; |
1da177e4 | 1199 | |
5ebaee6d MB |
1200 | /* Convert to crypto api definition? */ |
1201 | if (strchr(cipher_in, '(')) { | |
1202 | ti->error = "Bad cipher specification"; | |
1da177e4 LT |
1203 | return -EINVAL; |
1204 | } | |
1205 | ||
7dbcd137 MB |
1206 | cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); |
1207 | if (!cc->cipher_string) | |
1208 | goto bad_mem; | |
1209 | ||
5ebaee6d MB |
1210 | /* |
1211 | * Legacy dm-crypt cipher specification | |
1212 | * cipher-mode-iv:ivopts | |
1213 | */ | |
1214 | tmp = cipher_in; | |
1da177e4 | 1215 | cipher = strsep(&tmp, "-"); |
5ebaee6d MB |
1216 | |
1217 | cc->cipher = kstrdup(cipher, GFP_KERNEL); | |
1218 | if (!cc->cipher) | |
1219 | goto bad_mem; | |
1220 | ||
1da177e4 LT |
1221 | chainmode = strsep(&tmp, "-"); |
1222 | ivopts = strsep(&tmp, "-"); | |
1223 | ivmode = strsep(&ivopts, ":"); | |
1224 | ||
1225 | if (tmp) | |
5ebaee6d | 1226 | DMWARN("Ignoring unexpected additional cipher options"); |
1da177e4 | 1227 | |
c0297721 AK |
1228 | cc->cpu = alloc_percpu(struct crypt_cpu); |
1229 | if (!cc->cpu) { | |
1230 | ti->error = "Cannot allocate per cpu state"; | |
1231 | goto bad_mem; | |
1232 | } | |
1233 | ||
7dbcd137 MB |
1234 | /* |
1235 | * For compatibility with the original dm-crypt mapping format, if | |
1236 | * only the cipher name is supplied, use cbc-plain. | |
1237 | */ | |
5ebaee6d | 1238 | if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { |
1da177e4 LT |
1239 | chainmode = "cbc"; |
1240 | ivmode = "plain"; | |
1241 | } | |
1242 | ||
d1806f6a | 1243 | if (strcmp(chainmode, "ecb") && !ivmode) { |
5ebaee6d MB |
1244 | ti->error = "IV mechanism required"; |
1245 | return -EINVAL; | |
1da177e4 LT |
1246 | } |
1247 | ||
5ebaee6d MB |
1248 | cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); |
1249 | if (!cipher_api) | |
1250 | goto bad_mem; | |
1251 | ||
1252 | ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, | |
1253 | "%s(%s)", chainmode, cipher); | |
1254 | if (ret < 0) { | |
1255 | kfree(cipher_api); | |
1256 | goto bad_mem; | |
1da177e4 LT |
1257 | } |
1258 | ||
5ebaee6d | 1259 | /* Allocate cipher */ |
c0297721 AK |
1260 | for_each_possible_cpu(cpu) { |
1261 | tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); | |
1262 | if (IS_ERR(tfm)) { | |
1263 | ret = PTR_ERR(tfm); | |
1264 | ti->error = "Error allocating crypto tfm"; | |
1265 | goto bad; | |
1266 | } | |
1267 | per_cpu_ptr(cc->cpu, cpu)->tfm = tfm; | |
1da177e4 | 1268 | } |
1da177e4 | 1269 | |
5ebaee6d MB |
1270 | /* Initialize and set key */ |
1271 | ret = crypt_set_key(cc, key); | |
28513fcc | 1272 | if (ret < 0) { |
0b430958 | 1273 | ti->error = "Error decoding and setting key"; |
28513fcc | 1274 | goto bad; |
0b430958 MB |
1275 | } |
1276 | ||
5ebaee6d | 1277 | /* Initialize IV */ |
c0297721 | 1278 | cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); |
5ebaee6d MB |
1279 | if (cc->iv_size) |
1280 | /* at least a 64 bit sector number should fit in our buffer */ | |
1281 | cc->iv_size = max(cc->iv_size, | |
1282 | (unsigned int)(sizeof(u64) / sizeof(u8))); | |
1283 | else if (ivmode) { | |
1284 | DMWARN("Selected cipher does not support IVs"); | |
1285 | ivmode = NULL; | |
1286 | } | |
1287 | ||
1288 | /* Choose ivmode, see comments at iv code. */ | |
1da177e4 LT |
1289 | if (ivmode == NULL) |
1290 | cc->iv_gen_ops = NULL; | |
1291 | else if (strcmp(ivmode, "plain") == 0) | |
1292 | cc->iv_gen_ops = &crypt_iv_plain_ops; | |
61afef61 MB |
1293 | else if (strcmp(ivmode, "plain64") == 0) |
1294 | cc->iv_gen_ops = &crypt_iv_plain64_ops; | |
1da177e4 LT |
1295 | else if (strcmp(ivmode, "essiv") == 0) |
1296 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | |
48527fa7 RS |
1297 | else if (strcmp(ivmode, "benbi") == 0) |
1298 | cc->iv_gen_ops = &crypt_iv_benbi_ops; | |
46b47730 LN |
1299 | else if (strcmp(ivmode, "null") == 0) |
1300 | cc->iv_gen_ops = &crypt_iv_null_ops; | |
1da177e4 | 1301 | else { |
5ebaee6d | 1302 | ret = -EINVAL; |
72d94861 | 1303 | ti->error = "Invalid IV mode"; |
28513fcc | 1304 | goto bad; |
1da177e4 LT |
1305 | } |
1306 | ||
28513fcc MB |
1307 | /* Allocate IV */ |
1308 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { | |
1309 | ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); | |
1310 | if (ret < 0) { | |
1311 | ti->error = "Error creating IV"; | |
1312 | goto bad; | |
1313 | } | |
1314 | } | |
1da177e4 | 1315 | |
28513fcc MB |
1316 | /* Initialize IV (set keys for ESSIV etc) */ |
1317 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) { | |
1318 | ret = cc->iv_gen_ops->init(cc); | |
1319 | if (ret < 0) { | |
1320 | ti->error = "Error initialising IV"; | |
1321 | goto bad; | |
1322 | } | |
b95bf2d3 MB |
1323 | } |
1324 | ||
5ebaee6d MB |
1325 | ret = 0; |
1326 | bad: | |
1327 | kfree(cipher_api); | |
1328 | return ret; | |
1329 | ||
1330 | bad_mem: | |
1331 | ti->error = "Cannot allocate cipher strings"; | |
1332 | return -ENOMEM; | |
1333 | } | |
1334 | ||
1335 | /* | |
1336 | * Construct an encryption mapping: | |
1337 | * <cipher> <key> <iv_offset> <dev_path> <start> | |
1338 | */ | |
1339 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1340 | { | |
1341 | struct crypt_config *cc; | |
1342 | unsigned int key_size; | |
1343 | unsigned long long tmpll; | |
1344 | int ret; | |
1345 | ||
1346 | if (argc != 5) { | |
1347 | ti->error = "Not enough arguments"; | |
1348 | return -EINVAL; | |
1da177e4 LT |
1349 | } |
1350 | ||
5ebaee6d MB |
1351 | key_size = strlen(argv[1]) >> 1; |
1352 | ||
1353 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | |
1354 | if (!cc) { | |
1355 | ti->error = "Cannot allocate encryption context"; | |
1356 | return -ENOMEM; | |
1357 | } | |
69a8cfcd | 1358 | cc->key_size = key_size; |
5ebaee6d MB |
1359 | |
1360 | ti->private = cc; | |
1361 | ret = crypt_ctr_cipher(ti, argv[0], argv[1]); | |
1362 | if (ret < 0) | |
1363 | goto bad; | |
1364 | ||
28513fcc | 1365 | ret = -ENOMEM; |
93d2341c | 1366 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1da177e4 | 1367 | if (!cc->io_pool) { |
72d94861 | 1368 | ti->error = "Cannot allocate crypt io mempool"; |
28513fcc | 1369 | goto bad; |
1da177e4 LT |
1370 | } |
1371 | ||
ddd42edf | 1372 | cc->dmreq_start = sizeof(struct ablkcipher_request); |
c0297721 | 1373 | cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); |
ddd42edf | 1374 | cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); |
c0297721 | 1375 | cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & |
3a7f6c99 | 1376 | ~(crypto_tfm_ctx_alignment() - 1); |
ddd42edf MB |
1377 | |
1378 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + | |
1379 | sizeof(struct dm_crypt_request) + cc->iv_size); | |
1380 | if (!cc->req_pool) { | |
1381 | ti->error = "Cannot allocate crypt request mempool"; | |
28513fcc | 1382 | goto bad; |
ddd42edf | 1383 | } |
ddd42edf | 1384 | |
a19b27ce | 1385 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1da177e4 | 1386 | if (!cc->page_pool) { |
72d94861 | 1387 | ti->error = "Cannot allocate page mempool"; |
28513fcc | 1388 | goto bad; |
1da177e4 LT |
1389 | } |
1390 | ||
bb799ca0 | 1391 | cc->bs = bioset_create(MIN_IOS, 0); |
6a24c718 MB |
1392 | if (!cc->bs) { |
1393 | ti->error = "Cannot allocate crypt bioset"; | |
28513fcc | 1394 | goto bad; |
6a24c718 MB |
1395 | } |
1396 | ||
28513fcc | 1397 | ret = -EINVAL; |
4ee218cd | 1398 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
72d94861 | 1399 | ti->error = "Invalid iv_offset sector"; |
28513fcc | 1400 | goto bad; |
1da177e4 | 1401 | } |
4ee218cd | 1402 | cc->iv_offset = tmpll; |
1da177e4 | 1403 | |
28513fcc MB |
1404 | if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { |
1405 | ti->error = "Device lookup failed"; | |
1406 | goto bad; | |
1407 | } | |
1408 | ||
4ee218cd | 1409 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
72d94861 | 1410 | ti->error = "Invalid device sector"; |
28513fcc | 1411 | goto bad; |
1da177e4 | 1412 | } |
4ee218cd | 1413 | cc->start = tmpll; |
1da177e4 | 1414 | |
28513fcc | 1415 | ret = -ENOMEM; |
c0297721 AK |
1416 | cc->io_queue = alloc_workqueue("kcryptd_io", |
1417 | WQ_NON_REENTRANT| | |
1418 | WQ_MEM_RECLAIM, | |
1419 | 1); | |
cabf08e4 MB |
1420 | if (!cc->io_queue) { |
1421 | ti->error = "Couldn't create kcryptd io queue"; | |
28513fcc | 1422 | goto bad; |
cabf08e4 MB |
1423 | } |
1424 | ||
c0297721 AK |
1425 | cc->crypt_queue = alloc_workqueue("kcryptd", |
1426 | WQ_NON_REENTRANT| | |
1427 | WQ_CPU_INTENSIVE| | |
1428 | WQ_MEM_RECLAIM, | |
1429 | 1); | |
cabf08e4 | 1430 | if (!cc->crypt_queue) { |
9934a8be | 1431 | ti->error = "Couldn't create kcryptd queue"; |
28513fcc | 1432 | goto bad; |
9934a8be MB |
1433 | } |
1434 | ||
647c7db1 | 1435 | ti->num_flush_requests = 1; |
1da177e4 LT |
1436 | return 0; |
1437 | ||
28513fcc MB |
1438 | bad: |
1439 | crypt_dtr(ti); | |
1440 | return ret; | |
1da177e4 LT |
1441 | } |
1442 | ||
1da177e4 LT |
1443 | static int crypt_map(struct dm_target *ti, struct bio *bio, |
1444 | union map_info *map_context) | |
1445 | { | |
028867ac | 1446 | struct dm_crypt_io *io; |
647c7db1 MP |
1447 | struct crypt_config *cc; |
1448 | ||
d87f4c14 | 1449 | if (bio->bi_rw & REQ_FLUSH) { |
647c7db1 MP |
1450 | cc = ti->private; |
1451 | bio->bi_bdev = cc->dev->bdev; | |
1452 | return DM_MAPIO_REMAPPED; | |
1453 | } | |
1da177e4 | 1454 | |
b441a262 | 1455 | io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); |
cabf08e4 | 1456 | |
20c82538 MB |
1457 | if (bio_data_dir(io->base_bio) == READ) { |
1458 | if (kcryptd_io_read(io, GFP_NOWAIT)) | |
1459 | kcryptd_queue_io(io); | |
1460 | } else | |
cabf08e4 | 1461 | kcryptd_queue_crypt(io); |
1da177e4 | 1462 | |
d2a7ad29 | 1463 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1464 | } |
1465 | ||
1466 | static int crypt_status(struct dm_target *ti, status_type_t type, | |
1467 | char *result, unsigned int maxlen) | |
1468 | { | |
5ebaee6d | 1469 | struct crypt_config *cc = ti->private; |
1da177e4 LT |
1470 | unsigned int sz = 0; |
1471 | ||
1472 | switch (type) { | |
1473 | case STATUSTYPE_INFO: | |
1474 | result[0] = '\0'; | |
1475 | break; | |
1476 | ||
1477 | case STATUSTYPE_TABLE: | |
7dbcd137 | 1478 | DMEMIT("%s ", cc->cipher_string); |
1da177e4 LT |
1479 | |
1480 | if (cc->key_size > 0) { | |
1481 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) | |
1482 | return -ENOMEM; | |
1483 | ||
1484 | crypt_encode_key(result + sz, cc->key, cc->key_size); | |
1485 | sz += cc->key_size << 1; | |
1486 | } else { | |
1487 | if (sz >= maxlen) | |
1488 | return -ENOMEM; | |
1489 | result[sz++] = '-'; | |
1490 | } | |
1491 | ||
4ee218cd AM |
1492 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
1493 | cc->dev->name, (unsigned long long)cc->start); | |
1da177e4 LT |
1494 | break; |
1495 | } | |
1496 | return 0; | |
1497 | } | |
1498 | ||
e48d4bbf MB |
1499 | static void crypt_postsuspend(struct dm_target *ti) |
1500 | { | |
1501 | struct crypt_config *cc = ti->private; | |
1502 | ||
1503 | set_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
1504 | } | |
1505 | ||
1506 | static int crypt_preresume(struct dm_target *ti) | |
1507 | { | |
1508 | struct crypt_config *cc = ti->private; | |
1509 | ||
1510 | if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { | |
1511 | DMERR("aborting resume - crypt key is not set."); | |
1512 | return -EAGAIN; | |
1513 | } | |
1514 | ||
1515 | return 0; | |
1516 | } | |
1517 | ||
1518 | static void crypt_resume(struct dm_target *ti) | |
1519 | { | |
1520 | struct crypt_config *cc = ti->private; | |
1521 | ||
1522 | clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
1523 | } | |
1524 | ||
1525 | /* Message interface | |
1526 | * key set <key> | |
1527 | * key wipe | |
1528 | */ | |
1529 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | |
1530 | { | |
1531 | struct crypt_config *cc = ti->private; | |
542da317 | 1532 | int ret = -EINVAL; |
e48d4bbf MB |
1533 | |
1534 | if (argc < 2) | |
1535 | goto error; | |
1536 | ||
1537 | if (!strnicmp(argv[0], MESG_STR("key"))) { | |
1538 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { | |
1539 | DMWARN("not suspended during key manipulation."); | |
1540 | return -EINVAL; | |
1541 | } | |
542da317 MB |
1542 | if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { |
1543 | ret = crypt_set_key(cc, argv[2]); | |
1544 | if (ret) | |
1545 | return ret; | |
1546 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) | |
1547 | ret = cc->iv_gen_ops->init(cc); | |
1548 | return ret; | |
1549 | } | |
1550 | if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { | |
1551 | if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { | |
1552 | ret = cc->iv_gen_ops->wipe(cc); | |
1553 | if (ret) | |
1554 | return ret; | |
1555 | } | |
e48d4bbf | 1556 | return crypt_wipe_key(cc); |
542da317 | 1557 | } |
e48d4bbf MB |
1558 | } |
1559 | ||
1560 | error: | |
1561 | DMWARN("unrecognised message received."); | |
1562 | return -EINVAL; | |
1563 | } | |
1564 | ||
d41e26b9 MB |
1565 | static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, |
1566 | struct bio_vec *biovec, int max_size) | |
1567 | { | |
1568 | struct crypt_config *cc = ti->private; | |
1569 | struct request_queue *q = bdev_get_queue(cc->dev->bdev); | |
1570 | ||
1571 | if (!q->merge_bvec_fn) | |
1572 | return max_size; | |
1573 | ||
1574 | bvm->bi_bdev = cc->dev->bdev; | |
b441a262 | 1575 | bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); |
d41e26b9 MB |
1576 | |
1577 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | |
1578 | } | |
1579 | ||
af4874e0 MS |
1580 | static int crypt_iterate_devices(struct dm_target *ti, |
1581 | iterate_devices_callout_fn fn, void *data) | |
1582 | { | |
1583 | struct crypt_config *cc = ti->private; | |
1584 | ||
5dea271b | 1585 | return fn(ti, cc->dev, cc->start, ti->len, data); |
af4874e0 MS |
1586 | } |
1587 | ||
1da177e4 LT |
1588 | static struct target_type crypt_target = { |
1589 | .name = "crypt", | |
c0297721 | 1590 | .version = {1, 9, 0}, |
1da177e4 LT |
1591 | .module = THIS_MODULE, |
1592 | .ctr = crypt_ctr, | |
1593 | .dtr = crypt_dtr, | |
1594 | .map = crypt_map, | |
1595 | .status = crypt_status, | |
e48d4bbf MB |
1596 | .postsuspend = crypt_postsuspend, |
1597 | .preresume = crypt_preresume, | |
1598 | .resume = crypt_resume, | |
1599 | .message = crypt_message, | |
d41e26b9 | 1600 | .merge = crypt_merge, |
af4874e0 | 1601 | .iterate_devices = crypt_iterate_devices, |
1da177e4 LT |
1602 | }; |
1603 | ||
1604 | static int __init dm_crypt_init(void) | |
1605 | { | |
1606 | int r; | |
1607 | ||
028867ac | 1608 | _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); |
1da177e4 LT |
1609 | if (!_crypt_io_pool) |
1610 | return -ENOMEM; | |
1611 | ||
1da177e4 LT |
1612 | r = dm_register_target(&crypt_target); |
1613 | if (r < 0) { | |
72d94861 | 1614 | DMERR("register failed %d", r); |
9934a8be | 1615 | kmem_cache_destroy(_crypt_io_pool); |
1da177e4 LT |
1616 | } |
1617 | ||
1da177e4 LT |
1618 | return r; |
1619 | } | |
1620 | ||
1621 | static void __exit dm_crypt_exit(void) | |
1622 | { | |
10d3bd09 | 1623 | dm_unregister_target(&crypt_target); |
1da177e4 LT |
1624 | kmem_cache_destroy(_crypt_io_pool); |
1625 | } | |
1626 | ||
1627 | module_init(dm_crypt_init); | |
1628 | module_exit(dm_crypt_exit); | |
1629 | ||
1630 | MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); | |
1631 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
1632 | MODULE_LICENSE("GPL"); |