Merge tag 'v3.10.108' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / crypto / blkcipher.c
1 /*
2 * Block chaining cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/errno.h>
20 #include <linux/hardirq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/cryptouser.h>
28 #include <net/netlink.h>
29
30 #include "internal.h"
31
32 enum {
33 BLKCIPHER_WALK_PHYS = 1 << 0,
34 BLKCIPHER_WALK_SLOW = 1 << 1,
35 BLKCIPHER_WALK_COPY = 1 << 2,
36 BLKCIPHER_WALK_DIFF = 1 << 3,
37 };
38
39 static int blkcipher_walk_next(struct blkcipher_desc *desc,
40 struct blkcipher_walk *walk);
41 static int blkcipher_walk_first(struct blkcipher_desc *desc,
42 struct blkcipher_walk *walk);
43
44 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
45 {
46 walk->src.virt.addr = scatterwalk_map(&walk->in);
47 }
48
49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
50 {
51 walk->dst.virt.addr = scatterwalk_map(&walk->out);
52 }
53
54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
55 {
56 scatterwalk_unmap(walk->src.virt.addr);
57 }
58
59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
60 {
61 scatterwalk_unmap(walk->dst.virt.addr);
62 }
63
64 /* Get a spot of the specified length that does not straddle a page.
65 * The caller needs to ensure that there is enough space for this operation.
66 */
67 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
68 {
69 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
70 return max(start, end_page);
71 }
72
73 static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
74 unsigned int bsize)
75 {
76 u8 *addr;
77
78 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
79 addr = blkcipher_get_spot(addr, bsize);
80 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81 return bsize;
82 }
83
84 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
85 unsigned int n)
86 {
87 if (walk->flags & BLKCIPHER_WALK_COPY) {
88 blkcipher_map_dst(walk);
89 memcpy(walk->dst.virt.addr, walk->page, n);
90 blkcipher_unmap_dst(walk);
91 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
92 if (walk->flags & BLKCIPHER_WALK_DIFF)
93 blkcipher_unmap_dst(walk);
94 blkcipher_unmap_src(walk);
95 }
96
97 scatterwalk_advance(&walk->in, n);
98 scatterwalk_advance(&walk->out, n);
99
100 return n;
101 }
102
103 int blkcipher_walk_done(struct blkcipher_desc *desc,
104 struct blkcipher_walk *walk, int err)
105 {
106 unsigned int nbytes = 0;
107
108 if (likely(err >= 0)) {
109 unsigned int n = walk->nbytes - err;
110
111 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
112 n = blkcipher_done_fast(walk, n);
113 else if (WARN_ON(err)) {
114 err = -EINVAL;
115 goto err;
116 } else
117 n = blkcipher_done_slow(walk, n);
118
119 nbytes = walk->total - n;
120 err = 0;
121 }
122
123 scatterwalk_done(&walk->in, 0, nbytes);
124 scatterwalk_done(&walk->out, 1, nbytes);
125
126 err:
127 walk->total = nbytes;
128 walk->nbytes = nbytes;
129
130 if (nbytes) {
131 crypto_yield(desc->flags);
132 return blkcipher_walk_next(desc, walk);
133 }
134
135 if (walk->iv != desc->info)
136 memcpy(desc->info, walk->iv, walk->ivsize);
137 if (walk->buffer != walk->page)
138 kfree(walk->buffer);
139 if (walk->page)
140 free_page((unsigned long)walk->page);
141
142 return err;
143 }
144 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
145
146 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
147 struct blkcipher_walk *walk,
148 unsigned int bsize,
149 unsigned int alignmask)
150 {
151 unsigned int n;
152 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
153
154 if (walk->buffer)
155 goto ok;
156
157 walk->buffer = walk->page;
158 if (walk->buffer)
159 goto ok;
160
161 n = aligned_bsize * 3 - (alignmask + 1) +
162 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
163 walk->buffer = kmalloc(n, GFP_ATOMIC);
164 if (!walk->buffer)
165 return blkcipher_walk_done(desc, walk, -ENOMEM);
166
167 ok:
168 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
169 alignmask + 1);
170 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
171 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
172 aligned_bsize, bsize);
173
174 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
175
176 walk->nbytes = bsize;
177 walk->flags |= BLKCIPHER_WALK_SLOW;
178
179 return 0;
180 }
181
182 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
183 {
184 u8 *tmp = walk->page;
185
186 blkcipher_map_src(walk);
187 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
188 blkcipher_unmap_src(walk);
189
190 walk->src.virt.addr = tmp;
191 walk->dst.virt.addr = tmp;
192
193 return 0;
194 }
195
196 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
197 struct blkcipher_walk *walk)
198 {
199 unsigned long diff;
200
201 walk->src.phys.page = scatterwalk_page(&walk->in);
202 walk->src.phys.offset = offset_in_page(walk->in.offset);
203 walk->dst.phys.page = scatterwalk_page(&walk->out);
204 walk->dst.phys.offset = offset_in_page(walk->out.offset);
205
206 if (walk->flags & BLKCIPHER_WALK_PHYS)
207 return 0;
208
209 diff = walk->src.phys.offset - walk->dst.phys.offset;
210 diff |= walk->src.virt.page - walk->dst.virt.page;
211
212 blkcipher_map_src(walk);
213 walk->dst.virt.addr = walk->src.virt.addr;
214
215 if (diff) {
216 walk->flags |= BLKCIPHER_WALK_DIFF;
217 blkcipher_map_dst(walk);
218 }
219
220 return 0;
221 }
222
223 static int blkcipher_walk_next(struct blkcipher_desc *desc,
224 struct blkcipher_walk *walk)
225 {
226 unsigned int bsize;
227 unsigned int n;
228 int err;
229
230 n = walk->total;
231 if (unlikely(n < walk->cipher_blocksize)) {
232 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
233 return blkcipher_walk_done(desc, walk, -EINVAL);
234 }
235
236 bsize = min(walk->blocksize, n);
237
238 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
239 BLKCIPHER_WALK_DIFF);
240 if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
241 !scatterwalk_aligned(&walk->out, walk->alignmask)) {
242 walk->flags |= BLKCIPHER_WALK_COPY;
243 if (!walk->page) {
244 walk->page = (void *)__get_free_page(GFP_ATOMIC);
245 if (!walk->page)
246 n = 0;
247 }
248 }
249
250 bsize = min(walk->walk_blocksize, n);
251 n = scatterwalk_clamp(&walk->in, n);
252 n = scatterwalk_clamp(&walk->out, n);
253
254 if (unlikely(n < bsize)) {
255 err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
256 goto set_phys_lowmem;
257 }
258
259 walk->nbytes = n;
260 if (walk->flags & BLKCIPHER_WALK_COPY) {
261 err = blkcipher_next_copy(walk);
262 goto set_phys_lowmem;
263 }
264
265 return blkcipher_next_fast(desc, walk);
266
267 set_phys_lowmem:
268 if (walk->flags & BLKCIPHER_WALK_PHYS) {
269 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
270 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
271 walk->src.phys.offset &= PAGE_SIZE - 1;
272 walk->dst.phys.offset &= PAGE_SIZE - 1;
273 }
274 return err;
275 }
276
277 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
278 {
279 unsigned bs = walk->walk_blocksize;
280 unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
281 unsigned int size = aligned_bs * 2 +
282 walk->ivsize + max(aligned_bs, walk->ivsize) -
283 (walk->alignmask + 1);
284 u8 *iv;
285
286 size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
287 walk->buffer = kmalloc(size, GFP_ATOMIC);
288 if (!walk->buffer)
289 return -ENOMEM;
290
291 iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
292 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
293 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
294 iv = blkcipher_get_spot(iv, walk->ivsize);
295
296 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
297 return 0;
298 }
299
300 int blkcipher_walk_virt(struct blkcipher_desc *desc,
301 struct blkcipher_walk *walk)
302 {
303 walk->flags &= ~BLKCIPHER_WALK_PHYS;
304 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
305 walk->cipher_blocksize = walk->walk_blocksize;
306 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
307 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
308 return blkcipher_walk_first(desc, walk);
309 }
310 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
311
312 int blkcipher_walk_phys(struct blkcipher_desc *desc,
313 struct blkcipher_walk *walk)
314 {
315 walk->flags |= BLKCIPHER_WALK_PHYS;
316 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
317 walk->cipher_blocksize = walk->walk_blocksize;
318 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
319 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
320 return blkcipher_walk_first(desc, walk);
321 }
322 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
323
324 static int blkcipher_walk_first(struct blkcipher_desc *desc,
325 struct blkcipher_walk *walk)
326 {
327 if (WARN_ON_ONCE(in_irq()))
328 return -EDEADLK;
329
330 walk->nbytes = walk->total;
331 if (unlikely(!walk->total))
332 return 0;
333
334 walk->buffer = NULL;
335 walk->iv = desc->info;
336 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
337 int err = blkcipher_copy_iv(walk);
338 if (err)
339 return err;
340 }
341
342 scatterwalk_start(&walk->in, walk->in.sg);
343 scatterwalk_start(&walk->out, walk->out.sg);
344 walk->page = NULL;
345
346 return blkcipher_walk_next(desc, walk);
347 }
348
349 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
350 struct blkcipher_walk *walk,
351 unsigned int blocksize)
352 {
353 walk->flags &= ~BLKCIPHER_WALK_PHYS;
354 walk->walk_blocksize = blocksize;
355 walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
356 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
357 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
358 return blkcipher_walk_first(desc, walk);
359 }
360 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
361
362 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
363 struct blkcipher_walk *walk,
364 struct crypto_aead *tfm,
365 unsigned int blocksize)
366 {
367 walk->flags &= ~BLKCIPHER_WALK_PHYS;
368 walk->walk_blocksize = blocksize;
369 walk->cipher_blocksize = crypto_aead_blocksize(tfm);
370 walk->ivsize = crypto_aead_ivsize(tfm);
371 walk->alignmask = crypto_aead_alignmask(tfm);
372 return blkcipher_walk_first(desc, walk);
373 }
374 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
375
376 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
377 unsigned int keylen)
378 {
379 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
380 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
381 int ret;
382 u8 *buffer, *alignbuffer;
383 unsigned long absize;
384
385 absize = keylen + alignmask;
386 buffer = kmalloc(absize, GFP_ATOMIC);
387 if (!buffer)
388 return -ENOMEM;
389
390 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
391 memcpy(alignbuffer, key, keylen);
392 ret = cipher->setkey(tfm, alignbuffer, keylen);
393 memset(alignbuffer, 0, keylen);
394 kfree(buffer);
395 return ret;
396 }
397
398 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
399 {
400 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
401 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
402
403 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
404 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
405 return -EINVAL;
406 }
407
408 if ((unsigned long)key & alignmask)
409 return setkey_unaligned(tfm, key, keylen);
410
411 return cipher->setkey(tfm, key, keylen);
412 }
413
414 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
415 unsigned int keylen)
416 {
417 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
418 }
419
420 static int async_encrypt(struct ablkcipher_request *req)
421 {
422 struct crypto_tfm *tfm = req->base.tfm;
423 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
424 struct blkcipher_desc desc = {
425 .tfm = __crypto_blkcipher_cast(tfm),
426 .info = req->info,
427 .flags = req->base.flags,
428 };
429
430
431 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
432 }
433
434 static int async_decrypt(struct ablkcipher_request *req)
435 {
436 struct crypto_tfm *tfm = req->base.tfm;
437 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
438 struct blkcipher_desc desc = {
439 .tfm = __crypto_blkcipher_cast(tfm),
440 .info = req->info,
441 .flags = req->base.flags,
442 };
443
444 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
445 }
446
447 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
448 u32 mask)
449 {
450 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
451 unsigned int len = alg->cra_ctxsize;
452
453 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
454 cipher->ivsize) {
455 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
456 len += cipher->ivsize;
457 }
458
459 return len;
460 }
461
462 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
463 {
464 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
465 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
466
467 crt->setkey = async_setkey;
468 crt->encrypt = async_encrypt;
469 crt->decrypt = async_decrypt;
470 if (!alg->ivsize) {
471 crt->givencrypt = skcipher_null_givencrypt;
472 crt->givdecrypt = skcipher_null_givdecrypt;
473 }
474 crt->base = __crypto_ablkcipher_cast(tfm);
475 crt->ivsize = alg->ivsize;
476 crt->has_setkey = alg->max_keysize;
477
478 return 0;
479 }
480
481 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
482 {
483 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
484 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
485 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
486 unsigned long addr;
487
488 crt->setkey = setkey;
489 crt->encrypt = alg->encrypt;
490 crt->decrypt = alg->decrypt;
491
492 addr = (unsigned long)crypto_tfm_ctx(tfm);
493 addr = ALIGN(addr, align);
494 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
495 crt->iv = (void *)addr;
496
497 return 0;
498 }
499
500 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
501 {
502 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
503
504 if (alg->ivsize > PAGE_SIZE / 8)
505 return -EINVAL;
506
507 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
508 return crypto_init_blkcipher_ops_sync(tfm);
509 else
510 return crypto_init_blkcipher_ops_async(tfm);
511 }
512
513 #ifdef CONFIG_NET
514 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
515 {
516 struct crypto_report_blkcipher rblkcipher;
517
518 strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
519 strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
520 sizeof(rblkcipher.geniv));
521
522 rblkcipher.blocksize = alg->cra_blocksize;
523 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
524 rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
525 rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
526
527 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
528 sizeof(struct crypto_report_blkcipher), &rblkcipher))
529 goto nla_put_failure;
530 return 0;
531
532 nla_put_failure:
533 return -EMSGSIZE;
534 }
535 #else
536 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
537 {
538 return -ENOSYS;
539 }
540 #endif
541
542 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
543 __attribute__ ((unused));
544 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
545 {
546 seq_printf(m, "type : blkcipher\n");
547 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
548 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
549 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
550 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
551 seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
552 "<default>");
553 }
554
555 const struct crypto_type crypto_blkcipher_type = {
556 .ctxsize = crypto_blkcipher_ctxsize,
557 .init = crypto_init_blkcipher_ops,
558 #ifdef CONFIG_PROC_FS
559 .show = crypto_blkcipher_show,
560 #endif
561 .report = crypto_blkcipher_report,
562 };
563 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
564
565 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
566 const char *name, u32 type, u32 mask)
567 {
568 struct crypto_alg *alg;
569 int err;
570
571 type = crypto_skcipher_type(type);
572 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
573
574 alg = crypto_alg_mod_lookup(name, type, mask);
575 if (IS_ERR(alg))
576 return PTR_ERR(alg);
577
578 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
579 crypto_mod_put(alg);
580 return err;
581 }
582
583 struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
584 struct rtattr **tb, u32 type,
585 u32 mask)
586 {
587 struct {
588 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
589 unsigned int keylen);
590 int (*encrypt)(struct ablkcipher_request *req);
591 int (*decrypt)(struct ablkcipher_request *req);
592
593 unsigned int min_keysize;
594 unsigned int max_keysize;
595 unsigned int ivsize;
596
597 const char *geniv;
598 } balg;
599 const char *name;
600 struct crypto_skcipher_spawn *spawn;
601 struct crypto_attr_type *algt;
602 struct crypto_instance *inst;
603 struct crypto_alg *alg;
604 int err;
605
606 algt = crypto_get_attr_type(tb);
607 if (IS_ERR(algt))
608 return ERR_CAST(algt);
609
610 if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
611 algt->mask)
612 return ERR_PTR(-EINVAL);
613
614 name = crypto_attr_alg_name(tb[1]);
615 if (IS_ERR(name))
616 return ERR_CAST(name);
617
618 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
619 if (!inst)
620 return ERR_PTR(-ENOMEM);
621
622 spawn = crypto_instance_ctx(inst);
623
624 /* Ignore async algorithms if necessary. */
625 mask |= crypto_requires_sync(algt->type, algt->mask);
626
627 crypto_set_skcipher_spawn(spawn, inst);
628 err = crypto_grab_nivcipher(spawn, name, type, mask);
629 if (err)
630 goto err_free_inst;
631
632 alg = crypto_skcipher_spawn_alg(spawn);
633
634 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
635 CRYPTO_ALG_TYPE_BLKCIPHER) {
636 balg.ivsize = alg->cra_blkcipher.ivsize;
637 balg.min_keysize = alg->cra_blkcipher.min_keysize;
638 balg.max_keysize = alg->cra_blkcipher.max_keysize;
639
640 balg.setkey = async_setkey;
641 balg.encrypt = async_encrypt;
642 balg.decrypt = async_decrypt;
643
644 balg.geniv = alg->cra_blkcipher.geniv;
645 } else {
646 balg.ivsize = alg->cra_ablkcipher.ivsize;
647 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
648 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
649
650 balg.setkey = alg->cra_ablkcipher.setkey;
651 balg.encrypt = alg->cra_ablkcipher.encrypt;
652 balg.decrypt = alg->cra_ablkcipher.decrypt;
653
654 balg.geniv = alg->cra_ablkcipher.geniv;
655 }
656
657 err = -EINVAL;
658 if (!balg.ivsize)
659 goto err_drop_alg;
660
661 /*
662 * This is only true if we're constructing an algorithm with its
663 * default IV generator. For the default generator we elide the
664 * template name and double-check the IV generator.
665 */
666 if (algt->mask & CRYPTO_ALG_GENIV) {
667 if (!balg.geniv)
668 balg.geniv = crypto_default_geniv(alg);
669 err = -EAGAIN;
670 if (strcmp(tmpl->name, balg.geniv))
671 goto err_drop_alg;
672
673 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
674 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
675 CRYPTO_MAX_ALG_NAME);
676 } else {
677 err = -ENAMETOOLONG;
678 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
679 "%s(%s)", tmpl->name, alg->cra_name) >=
680 CRYPTO_MAX_ALG_NAME)
681 goto err_drop_alg;
682 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
683 "%s(%s)", tmpl->name, alg->cra_driver_name) >=
684 CRYPTO_MAX_ALG_NAME)
685 goto err_drop_alg;
686 }
687
688 inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
689 inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
690 inst->alg.cra_priority = alg->cra_priority;
691 inst->alg.cra_blocksize = alg->cra_blocksize;
692 inst->alg.cra_alignmask = alg->cra_alignmask;
693 inst->alg.cra_type = &crypto_givcipher_type;
694
695 inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
696 inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
697 inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
698 inst->alg.cra_ablkcipher.geniv = balg.geniv;
699
700 inst->alg.cra_ablkcipher.setkey = balg.setkey;
701 inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
702 inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
703
704 out:
705 return inst;
706
707 err_drop_alg:
708 crypto_drop_skcipher(spawn);
709 err_free_inst:
710 kfree(inst);
711 inst = ERR_PTR(err);
712 goto out;
713 }
714 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
715
716 void skcipher_geniv_free(struct crypto_instance *inst)
717 {
718 crypto_drop_skcipher(crypto_instance_ctx(inst));
719 kfree(inst);
720 }
721 EXPORT_SYMBOL_GPL(skcipher_geniv_free);
722
723 int skcipher_geniv_init(struct crypto_tfm *tfm)
724 {
725 struct crypto_instance *inst = (void *)tfm->__crt_alg;
726 struct crypto_ablkcipher *cipher;
727
728 cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
729 if (IS_ERR(cipher))
730 return PTR_ERR(cipher);
731
732 tfm->crt_ablkcipher.base = cipher;
733 tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
734
735 return 0;
736 }
737 EXPORT_SYMBOL_GPL(skcipher_geniv_init);
738
739 void skcipher_geniv_exit(struct crypto_tfm *tfm)
740 {
741 crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
742 }
743 EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
744
745 MODULE_LICENSE("GPL");
746 MODULE_DESCRIPTION("Generic block chaining cipher type");