crypto: arm/aesbs - fix brokenness after skcipher conversion
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / crypto / skcipher.c
CommitLineData
7a7ffe65
HX
1/*
2 * Symmetric key cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
b286d8b1 17#include <crypto/internal/aead.h>
7a7ffe65 18#include <crypto/internal/skcipher.h>
b286d8b1 19#include <crypto/scatterwalk.h>
7a7ffe65 20#include <linux/bug.h>
4e6c3df4 21#include <linux/cryptouser.h>
b286d8b1 22#include <linux/list.h>
7a7ffe65 23#include <linux/module.h>
4e6c3df4
HX
24#include <linux/rtnetlink.h>
25#include <linux/seq_file.h>
26#include <net/netlink.h>
7a7ffe65
HX
27
28#include "internal.h"
29
b286d8b1
HX
30enum {
31 SKCIPHER_WALK_PHYS = 1 << 0,
32 SKCIPHER_WALK_SLOW = 1 << 1,
33 SKCIPHER_WALK_COPY = 1 << 2,
34 SKCIPHER_WALK_DIFF = 1 << 3,
35 SKCIPHER_WALK_SLEEP = 1 << 4,
36};
37
38struct skcipher_walk_buffer {
39 struct list_head entry;
40 struct scatter_walk dst;
41 unsigned int len;
42 u8 *data;
43 u8 buffer[];
44};
45
46static int skcipher_walk_next(struct skcipher_walk *walk);
47
48static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
49{
50 if (PageHighMem(scatterwalk_page(walk)))
51 kunmap_atomic(vaddr);
52}
53
54static inline void *skcipher_map(struct scatter_walk *walk)
55{
56 struct page *page = scatterwalk_page(walk);
57
58 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
59 offset_in_page(walk->offset);
60}
61
62static inline void skcipher_map_src(struct skcipher_walk *walk)
63{
64 walk->src.virt.addr = skcipher_map(&walk->in);
65}
66
67static inline void skcipher_map_dst(struct skcipher_walk *walk)
68{
69 walk->dst.virt.addr = skcipher_map(&walk->out);
70}
71
72static inline void skcipher_unmap_src(struct skcipher_walk *walk)
73{
74 skcipher_unmap(&walk->in, walk->src.virt.addr);
75}
76
77static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
78{
79 skcipher_unmap(&walk->out, walk->dst.virt.addr);
80}
81
82static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
83{
84 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
85}
86
87/* Get a spot of the specified length that does not straddle a page.
88 * The caller needs to ensure that there is enough space for this operation.
89 */
90static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
91{
92 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
93
94 return max(start, end_page);
95}
96
97static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
98{
99 u8 *addr;
100
101 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
102 addr = skcipher_get_spot(addr, bsize);
103 scatterwalk_copychunks(addr, &walk->out, bsize,
104 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
105 return 0;
106}
107
108int skcipher_walk_done(struct skcipher_walk *walk, int err)
109{
110 unsigned int n = walk->nbytes - err;
111 unsigned int nbytes;
112
113 nbytes = walk->total - n;
114
115 if (unlikely(err < 0)) {
116 nbytes = 0;
117 n = 0;
118 } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
119 SKCIPHER_WALK_SLOW |
120 SKCIPHER_WALK_COPY |
121 SKCIPHER_WALK_DIFF)))) {
122unmap_src:
123 skcipher_unmap_src(walk);
124 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
125 skcipher_unmap_dst(walk);
126 goto unmap_src;
127 } else if (walk->flags & SKCIPHER_WALK_COPY) {
128 skcipher_map_dst(walk);
129 memcpy(walk->dst.virt.addr, walk->page, n);
130 skcipher_unmap_dst(walk);
131 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
132 if (WARN_ON(err)) {
133 err = -EINVAL;
134 nbytes = 0;
135 } else
136 n = skcipher_done_slow(walk, n);
137 }
138
139 if (err > 0)
140 err = 0;
141
142 walk->total = nbytes;
143 walk->nbytes = nbytes;
144
145 scatterwalk_advance(&walk->in, n);
146 scatterwalk_advance(&walk->out, n);
147 scatterwalk_done(&walk->in, 0, nbytes);
148 scatterwalk_done(&walk->out, 1, nbytes);
149
150 if (nbytes) {
151 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
152 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
153 return skcipher_walk_next(walk);
154 }
155
156 /* Short-circuit for the common/fast path. */
157 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
158 goto out;
159
160 if (walk->flags & SKCIPHER_WALK_PHYS)
161 goto out;
162
163 if (walk->iv != walk->oiv)
164 memcpy(walk->oiv, walk->iv, walk->ivsize);
165 if (walk->buffer != walk->page)
166 kfree(walk->buffer);
167 if (walk->page)
168 free_page((unsigned long)walk->page);
169
170out:
171 return err;
172}
173EXPORT_SYMBOL_GPL(skcipher_walk_done);
174
175void skcipher_walk_complete(struct skcipher_walk *walk, int err)
176{
177 struct skcipher_walk_buffer *p, *tmp;
178
179 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
180 u8 *data;
181
182 if (err)
183 goto done;
184
185 data = p->data;
186 if (!data) {
187 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
188 data = skcipher_get_spot(data, walk->chunksize);
189 }
190
191 scatterwalk_copychunks(data, &p->dst, p->len, 1);
192
193 if (offset_in_page(p->data) + p->len + walk->chunksize >
194 PAGE_SIZE)
195 free_page((unsigned long)p->data);
196
197done:
198 list_del(&p->entry);
199 kfree(p);
200 }
201
202 if (!err && walk->iv != walk->oiv)
203 memcpy(walk->oiv, walk->iv, walk->ivsize);
204 if (walk->buffer != walk->page)
205 kfree(walk->buffer);
206 if (walk->page)
207 free_page((unsigned long)walk->page);
208}
209EXPORT_SYMBOL_GPL(skcipher_walk_complete);
210
211static void skcipher_queue_write(struct skcipher_walk *walk,
212 struct skcipher_walk_buffer *p)
213{
214 p->dst = walk->out;
215 list_add_tail(&p->entry, &walk->buffers);
216}
217
218static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
219{
220 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
221 unsigned alignmask = walk->alignmask;
222 struct skcipher_walk_buffer *p;
223 unsigned a;
224 unsigned n;
225 u8 *buffer;
226 void *v;
227
228 if (!phys) {
229 buffer = walk->buffer ?: walk->page;
230 if (buffer)
231 goto ok;
232 }
233
234 /* Start with the minimum alignment of kmalloc. */
235 a = crypto_tfm_ctx_alignment() - 1;
236 n = bsize;
237
238 if (phys) {
239 /* Calculate the minimum alignment of p->buffer. */
240 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
241 n += sizeof(*p);
242 }
243
244 /* Minimum size to align p->buffer by alignmask. */
245 n += alignmask & ~a;
246
247 /* Minimum size to ensure p->buffer does not straddle a page. */
248 n += (bsize - 1) & ~(alignmask | a);
249
250 v = kzalloc(n, skcipher_walk_gfp(walk));
251 if (!v)
252 return skcipher_walk_done(walk, -ENOMEM);
253
254 if (phys) {
255 p = v;
256 p->len = bsize;
257 skcipher_queue_write(walk, p);
258 buffer = p->buffer;
259 } else {
260 walk->buffer = v;
261 buffer = v;
262 }
263
264ok:
265 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
266 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
267 walk->src.virt.addr = walk->dst.virt.addr;
268
269 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
270
271 walk->nbytes = bsize;
272 walk->flags |= SKCIPHER_WALK_SLOW;
273
274 return 0;
275}
276
277static int skcipher_next_copy(struct skcipher_walk *walk)
278{
279 struct skcipher_walk_buffer *p;
280 u8 *tmp = walk->page;
281
282 skcipher_map_src(walk);
283 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
284 skcipher_unmap_src(walk);
285
286 walk->src.virt.addr = tmp;
287 walk->dst.virt.addr = tmp;
288
289 if (!(walk->flags & SKCIPHER_WALK_PHYS))
290 return 0;
291
292 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
293 if (!p)
294 return -ENOMEM;
295
296 p->data = walk->page;
297 p->len = walk->nbytes;
298 skcipher_queue_write(walk, p);
299
300 if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize >
301 PAGE_SIZE)
302 walk->page = NULL;
303 else
304 walk->page += walk->nbytes;
305
306 return 0;
307}
308
309static int skcipher_next_fast(struct skcipher_walk *walk)
310{
311 unsigned long diff;
312
313 walk->src.phys.page = scatterwalk_page(&walk->in);
314 walk->src.phys.offset = offset_in_page(walk->in.offset);
315 walk->dst.phys.page = scatterwalk_page(&walk->out);
316 walk->dst.phys.offset = offset_in_page(walk->out.offset);
317
318 if (walk->flags & SKCIPHER_WALK_PHYS)
319 return 0;
320
321 diff = walk->src.phys.offset - walk->dst.phys.offset;
322 diff |= walk->src.virt.page - walk->dst.virt.page;
323
324 skcipher_map_src(walk);
325 walk->dst.virt.addr = walk->src.virt.addr;
326
327 if (diff) {
328 walk->flags |= SKCIPHER_WALK_DIFF;
329 skcipher_map_dst(walk);
330 }
331
332 return 0;
333}
334
335static int skcipher_walk_next(struct skcipher_walk *walk)
336{
337 unsigned int bsize;
338 unsigned int n;
339 int err;
340
341 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
342 SKCIPHER_WALK_DIFF);
343
344 n = walk->total;
345 bsize = min(walk->chunksize, max(n, walk->blocksize));
346 n = scatterwalk_clamp(&walk->in, n);
347 n = scatterwalk_clamp(&walk->out, n);
348
349 if (unlikely(n < bsize)) {
350 if (unlikely(walk->total < walk->blocksize))
351 return skcipher_walk_done(walk, -EINVAL);
352
353slow_path:
354 err = skcipher_next_slow(walk, bsize);
355 goto set_phys_lowmem;
356 }
357
358 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
359 if (!walk->page) {
360 gfp_t gfp = skcipher_walk_gfp(walk);
361
362 walk->page = (void *)__get_free_page(gfp);
363 if (!walk->page)
364 goto slow_path;
365 }
366
367 walk->nbytes = min_t(unsigned, n,
368 PAGE_SIZE - offset_in_page(walk->page));
369 walk->flags |= SKCIPHER_WALK_COPY;
370 err = skcipher_next_copy(walk);
371 goto set_phys_lowmem;
372 }
373
374 walk->nbytes = n;
375
376 return skcipher_next_fast(walk);
377
378set_phys_lowmem:
379 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
380 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
381 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
382 walk->src.phys.offset &= PAGE_SIZE - 1;
383 walk->dst.phys.offset &= PAGE_SIZE - 1;
384 }
385 return err;
386}
387EXPORT_SYMBOL_GPL(skcipher_walk_next);
388
389static int skcipher_copy_iv(struct skcipher_walk *walk)
390{
391 unsigned a = crypto_tfm_ctx_alignment() - 1;
392 unsigned alignmask = walk->alignmask;
393 unsigned ivsize = walk->ivsize;
394 unsigned bs = walk->chunksize;
395 unsigned aligned_bs;
396 unsigned size;
397 u8 *iv;
398
399 aligned_bs = ALIGN(bs, alignmask);
400
401 /* Minimum size to align buffer by alignmask. */
402 size = alignmask & ~a;
403
404 if (walk->flags & SKCIPHER_WALK_PHYS)
405 size += ivsize;
406 else {
407 size += aligned_bs + ivsize;
408
409 /* Minimum size to ensure buffer does not straddle a page. */
410 size += (bs - 1) & ~(alignmask | a);
411 }
412
413 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
414 if (!walk->buffer)
415 return -ENOMEM;
416
417 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
418 iv = skcipher_get_spot(iv, bs) + aligned_bs;
419
420 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
421 return 0;
422}
423
424static int skcipher_walk_first(struct skcipher_walk *walk)
425{
426 walk->nbytes = 0;
427
428 if (WARN_ON_ONCE(in_irq()))
429 return -EDEADLK;
430
431 if (unlikely(!walk->total))
432 return 0;
433
434 walk->buffer = NULL;
435 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
436 int err = skcipher_copy_iv(walk);
437 if (err)
438 return err;
439 }
440
441 walk->page = NULL;
442 walk->nbytes = walk->total;
443
444 return skcipher_walk_next(walk);
445}
446
447static int skcipher_walk_skcipher(struct skcipher_walk *walk,
448 struct skcipher_request *req)
449{
450 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
451
452 scatterwalk_start(&walk->in, req->src);
453 scatterwalk_start(&walk->out, req->dst);
454
455 walk->total = req->cryptlen;
456 walk->iv = req->iv;
457 walk->oiv = req->iv;
458
459 walk->flags &= ~SKCIPHER_WALK_SLEEP;
460 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
461 SKCIPHER_WALK_SLEEP : 0;
462
463 walk->blocksize = crypto_skcipher_blocksize(tfm);
464 walk->chunksize = crypto_skcipher_chunksize(tfm);
465 walk->ivsize = crypto_skcipher_ivsize(tfm);
466 walk->alignmask = crypto_skcipher_alignmask(tfm);
467
468 return skcipher_walk_first(walk);
469}
470
471int skcipher_walk_virt(struct skcipher_walk *walk,
472 struct skcipher_request *req, bool atomic)
473{
474 int err;
475
476 walk->flags &= ~SKCIPHER_WALK_PHYS;
477
478 err = skcipher_walk_skcipher(walk, req);
479
480 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
481
482 return err;
483}
484EXPORT_SYMBOL_GPL(skcipher_walk_virt);
485
486void skcipher_walk_atomise(struct skcipher_walk *walk)
487{
488 walk->flags &= ~SKCIPHER_WALK_SLEEP;
489}
490EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
491
492int skcipher_walk_async(struct skcipher_walk *walk,
493 struct skcipher_request *req)
494{
495 walk->flags |= SKCIPHER_WALK_PHYS;
496
497 INIT_LIST_HEAD(&walk->buffers);
498
499 return skcipher_walk_skcipher(walk, req);
500}
501EXPORT_SYMBOL_GPL(skcipher_walk_async);
502
503int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
504 bool atomic)
505{
506 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
507 int err;
508
3cbf61fb
AB
509 walk->flags &= ~SKCIPHER_WALK_PHYS;
510
b286d8b1
HX
511 scatterwalk_start(&walk->in, req->src);
512 scatterwalk_start(&walk->out, req->dst);
513
514 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
515 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
516
517 walk->total = req->cryptlen;
518 walk->iv = req->iv;
519 walk->oiv = req->iv;
520
521 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
522 walk->flags |= SKCIPHER_WALK_SLEEP;
523 else
524 walk->flags &= ~SKCIPHER_WALK_SLEEP;
525
526 walk->blocksize = crypto_aead_blocksize(tfm);
527 walk->chunksize = crypto_aead_chunksize(tfm);
528 walk->ivsize = crypto_aead_ivsize(tfm);
529 walk->alignmask = crypto_aead_alignmask(tfm);
530
531 err = skcipher_walk_first(walk);
532
533 if (atomic)
534 walk->flags &= ~SKCIPHER_WALK_SLEEP;
535
536 return err;
537}
538EXPORT_SYMBOL_GPL(skcipher_walk_aead);
539
7a7ffe65
HX
540static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
541{
542 if (alg->cra_type == &crypto_blkcipher_type)
543 return sizeof(struct crypto_blkcipher *);
544
4e6c3df4
HX
545 if (alg->cra_type == &crypto_ablkcipher_type ||
546 alg->cra_type == &crypto_givcipher_type)
547 return sizeof(struct crypto_ablkcipher *);
7a7ffe65 548
4e6c3df4 549 return crypto_alg_extsize(alg);
7a7ffe65
HX
550}
551
552static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
553 const u8 *key, unsigned int keylen)
554{
555 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
556 struct crypto_blkcipher *blkcipher = *ctx;
557 int err;
558
559 crypto_blkcipher_clear_flags(blkcipher, ~0);
560 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
561 CRYPTO_TFM_REQ_MASK);
562 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
563 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
564 CRYPTO_TFM_RES_MASK);
565
566 return err;
567}
568
569static int skcipher_crypt_blkcipher(struct skcipher_request *req,
570 int (*crypt)(struct blkcipher_desc *,
571 struct scatterlist *,
572 struct scatterlist *,
573 unsigned int))
574{
575 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
576 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
577 struct blkcipher_desc desc = {
578 .tfm = *ctx,
579 .info = req->iv,
580 .flags = req->base.flags,
581 };
582
583
584 return crypt(&desc, req->dst, req->src, req->cryptlen);
585}
586
587static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
588{
589 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
590 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
591 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
592
593 return skcipher_crypt_blkcipher(req, alg->encrypt);
594}
595
596static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
597{
598 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
599 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
600 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
601
602 return skcipher_crypt_blkcipher(req, alg->decrypt);
603}
604
605static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
606{
607 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
608
609 crypto_free_blkcipher(*ctx);
610}
611
ecdd6bed 612static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
7a7ffe65
HX
613{
614 struct crypto_alg *calg = tfm->__crt_alg;
615 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
616 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
617 struct crypto_blkcipher *blkcipher;
618 struct crypto_tfm *btfm;
619
620 if (!crypto_mod_get(calg))
621 return -EAGAIN;
622
623 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
624 CRYPTO_ALG_TYPE_MASK);
625 if (IS_ERR(btfm)) {
626 crypto_mod_put(calg);
627 return PTR_ERR(btfm);
628 }
629
630 blkcipher = __crypto_blkcipher_cast(btfm);
631 *ctx = blkcipher;
632 tfm->exit = crypto_exit_skcipher_ops_blkcipher;
633
634 skcipher->setkey = skcipher_setkey_blkcipher;
635 skcipher->encrypt = skcipher_encrypt_blkcipher;
636 skcipher->decrypt = skcipher_decrypt_blkcipher;
637
638 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
973fb3fb 639 skcipher->keysize = calg->cra_blkcipher.max_keysize;
7a7ffe65
HX
640
641 return 0;
642}
643
644static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
645 const u8 *key, unsigned int keylen)
646{
647 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
648 struct crypto_ablkcipher *ablkcipher = *ctx;
649 int err;
650
651 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
652 crypto_ablkcipher_set_flags(ablkcipher,
653 crypto_skcipher_get_flags(tfm) &
654 CRYPTO_TFM_REQ_MASK);
655 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
656 crypto_skcipher_set_flags(tfm,
657 crypto_ablkcipher_get_flags(ablkcipher) &
658 CRYPTO_TFM_RES_MASK);
659
660 return err;
661}
662
663static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
664 int (*crypt)(struct ablkcipher_request *))
665{
666 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
667 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
668 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
669
670 ablkcipher_request_set_tfm(subreq, *ctx);
671 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
672 req->base.complete, req->base.data);
673 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
674 req->iv);
675
676 return crypt(subreq);
677}
678
679static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
680{
681 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
682 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
683 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
684
685 return skcipher_crypt_ablkcipher(req, alg->encrypt);
686}
687
688static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
689{
690 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
691 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
692 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
693
694 return skcipher_crypt_ablkcipher(req, alg->decrypt);
695}
696
697static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
698{
699 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
700
701 crypto_free_ablkcipher(*ctx);
702}
703
ecdd6bed 704static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
7a7ffe65
HX
705{
706 struct crypto_alg *calg = tfm->__crt_alg;
707 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
708 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
709 struct crypto_ablkcipher *ablkcipher;
710 struct crypto_tfm *abtfm;
711
712 if (!crypto_mod_get(calg))
713 return -EAGAIN;
714
715 abtfm = __crypto_alloc_tfm(calg, 0, 0);
716 if (IS_ERR(abtfm)) {
717 crypto_mod_put(calg);
718 return PTR_ERR(abtfm);
719 }
720
721 ablkcipher = __crypto_ablkcipher_cast(abtfm);
722 *ctx = ablkcipher;
723 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
724
725 skcipher->setkey = skcipher_setkey_ablkcipher;
726 skcipher->encrypt = skcipher_encrypt_ablkcipher;
727 skcipher->decrypt = skcipher_decrypt_ablkcipher;
728
729 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
730 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
731 sizeof(struct ablkcipher_request);
973fb3fb 732 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
7a7ffe65
HX
733
734 return 0;
735}
736
4e6c3df4
HX
737static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
738{
739 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
740 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
741
742 alg->exit(skcipher);
743}
744
7a7ffe65
HX
745static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
746{
4e6c3df4
HX
747 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
748 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
749
7a7ffe65
HX
750 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
751 return crypto_init_skcipher_ops_blkcipher(tfm);
752
4e6c3df4
HX
753 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
754 tfm->__crt_alg->cra_type == &crypto_givcipher_type)
755 return crypto_init_skcipher_ops_ablkcipher(tfm);
756
757 skcipher->setkey = alg->setkey;
758 skcipher->encrypt = alg->encrypt;
759 skcipher->decrypt = alg->decrypt;
760 skcipher->ivsize = alg->ivsize;
761 skcipher->keysize = alg->max_keysize;
762
763 if (alg->exit)
764 skcipher->base.exit = crypto_skcipher_exit_tfm;
7a7ffe65 765
4e6c3df4
HX
766 if (alg->init)
767 return alg->init(skcipher);
768
769 return 0;
770}
771
772static void crypto_skcipher_free_instance(struct crypto_instance *inst)
773{
774 struct skcipher_instance *skcipher =
775 container_of(inst, struct skcipher_instance, s.base);
776
777 skcipher->free(skcipher);
778}
779
780static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
781 __attribute__ ((unused));
782static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
783{
784 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
785 base);
786
787 seq_printf(m, "type : skcipher\n");
788 seq_printf(m, "async : %s\n",
789 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
790 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
791 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
792 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
793 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
794 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
7a7ffe65
HX
795}
796
4e6c3df4
HX
797#ifdef CONFIG_NET
798static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
799{
800 struct crypto_report_blkcipher rblkcipher;
801 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
802 base);
803
804 strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
805 strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
806
807 rblkcipher.blocksize = alg->cra_blocksize;
808 rblkcipher.min_keysize = skcipher->min_keysize;
809 rblkcipher.max_keysize = skcipher->max_keysize;
810 rblkcipher.ivsize = skcipher->ivsize;
811
812 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
813 sizeof(struct crypto_report_blkcipher), &rblkcipher))
814 goto nla_put_failure;
815 return 0;
816
817nla_put_failure:
818 return -EMSGSIZE;
819}
820#else
821static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
822{
823 return -ENOSYS;
824}
825#endif
826
7a7ffe65
HX
827static const struct crypto_type crypto_skcipher_type2 = {
828 .extsize = crypto_skcipher_extsize,
829 .init_tfm = crypto_skcipher_init_tfm,
4e6c3df4
HX
830 .free = crypto_skcipher_free_instance,
831#ifdef CONFIG_PROC_FS
832 .show = crypto_skcipher_show,
833#endif
834 .report = crypto_skcipher_report,
7a7ffe65
HX
835 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
836 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
4e6c3df4 837 .type = CRYPTO_ALG_TYPE_SKCIPHER,
7a7ffe65
HX
838 .tfmsize = offsetof(struct crypto_skcipher, base),
839};
840
3a01d0ee 841int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
4e6c3df4
HX
842 const char *name, u32 type, u32 mask)
843{
844 spawn->base.frontend = &crypto_skcipher_type2;
845 return crypto_grab_spawn(&spawn->base, name, type, mask);
846}
3a01d0ee 847EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
4e6c3df4 848
7a7ffe65
HX
849struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
850 u32 type, u32 mask)
851{
852 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
853}
854EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
855
4e6c3df4
HX
856int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
857{
858 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
859 type, mask);
860}
861EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
862
863static int skcipher_prepare_alg(struct skcipher_alg *alg)
864{
865 struct crypto_alg *base = &alg->base;
866
867 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
868 return -EINVAL;
869
870 if (!alg->chunksize)
871 alg->chunksize = base->cra_blocksize;
872
873 base->cra_type = &crypto_skcipher_type2;
874 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
875 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
876
877 return 0;
878}
879
880int crypto_register_skcipher(struct skcipher_alg *alg)
881{
882 struct crypto_alg *base = &alg->base;
883 int err;
884
885 err = skcipher_prepare_alg(alg);
886 if (err)
887 return err;
888
889 return crypto_register_alg(base);
890}
891EXPORT_SYMBOL_GPL(crypto_register_skcipher);
892
893void crypto_unregister_skcipher(struct skcipher_alg *alg)
894{
895 crypto_unregister_alg(&alg->base);
896}
897EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
898
899int crypto_register_skciphers(struct skcipher_alg *algs, int count)
900{
901 int i, ret;
902
903 for (i = 0; i < count; i++) {
904 ret = crypto_register_skcipher(&algs[i]);
905 if (ret)
906 goto err;
907 }
908
909 return 0;
910
911err:
912 for (--i; i >= 0; --i)
913 crypto_unregister_skcipher(&algs[i]);
914
915 return ret;
916}
917EXPORT_SYMBOL_GPL(crypto_register_skciphers);
918
919void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
920{
921 int i;
922
923 for (i = count - 1; i >= 0; --i)
924 crypto_unregister_skcipher(&algs[i]);
925}
926EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
927
928int skcipher_register_instance(struct crypto_template *tmpl,
929 struct skcipher_instance *inst)
930{
931 int err;
932
933 err = skcipher_prepare_alg(&inst->alg);
934 if (err)
935 return err;
936
937 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
938}
939EXPORT_SYMBOL_GPL(skcipher_register_instance);
940
7a7ffe65
HX
941MODULE_LICENSE("GPL");
942MODULE_DESCRIPTION("Symmetric key cipher type");