arm64: dts: sm1_s905y3_bananapim5: Apply changes for bananapim5
[GitHub/LineageOS/G12/android_kernel_amlogic_linux-4.9.git] / crypto / algif_skcipher.c
1 /*
2 * algif_skcipher: User-space interface for skcipher algorithms
3 *
4 * This file provides the user-space API for symmetric key ciphers.
5 *
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15 #include <crypto/scatterwalk.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/if_alg.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/net.h>
24 #include <net/sock.h>
25
26 struct skcipher_sg_list {
27 struct list_head list;
28
29 int cur;
30
31 struct scatterlist sg[0];
32 };
33
34 struct skcipher_tfm {
35 struct crypto_skcipher *skcipher;
36 bool has_key;
37 };
38
39 struct skcipher_ctx {
40 struct list_head tsgl;
41 struct af_alg_sgl rsgl;
42
43 void *iv;
44
45 struct af_alg_completion completion;
46
47 atomic_t inflight;
48 size_t used;
49
50 unsigned int len;
51 bool more;
52 bool merge;
53 bool enc;
54
55 struct skcipher_request req;
56 };
57
58 struct skcipher_async_rsgl {
59 struct af_alg_sgl sgl;
60 struct list_head list;
61 };
62
63 struct skcipher_async_req {
64 struct kiocb *iocb;
65 struct skcipher_async_rsgl first_sgl;
66 struct list_head list;
67 struct scatterlist *tsg;
68 atomic_t *inflight;
69 struct skcipher_request req;
70 };
71
72 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
73 sizeof(struct scatterlist) - 1)
74
75 static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
76 {
77 struct skcipher_async_rsgl *rsgl, *tmp;
78 struct scatterlist *sgl;
79 struct scatterlist *sg;
80 int i, n;
81
82 list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
83 af_alg_free_sg(&rsgl->sgl);
84 if (rsgl != &sreq->first_sgl)
85 kfree(rsgl);
86 }
87 sgl = sreq->tsg;
88 n = sg_nents(sgl);
89 for_each_sg(sgl, sg, n, i) {
90 struct page *page = sg_page(sg);
91
92 /* some SGs may not have a page mapped */
93 if (page && page_ref_count(page))
94 put_page(page);
95 }
96
97 kfree(sreq->tsg);
98 }
99
100 static void skcipher_async_cb(struct crypto_async_request *req, int err)
101 {
102 struct skcipher_async_req *sreq = req->data;
103 struct kiocb *iocb = sreq->iocb;
104
105 atomic_dec(sreq->inflight);
106 skcipher_free_async_sgls(sreq);
107 kzfree(sreq);
108 iocb->ki_complete(iocb, err, err);
109 }
110
111 static inline int skcipher_sndbuf(struct sock *sk)
112 {
113 struct alg_sock *ask = alg_sk(sk);
114 struct skcipher_ctx *ctx = ask->private;
115
116 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
117 ctx->used, 0);
118 }
119
120 static inline bool skcipher_writable(struct sock *sk)
121 {
122 return PAGE_SIZE <= skcipher_sndbuf(sk);
123 }
124
125 static int skcipher_alloc_sgl(struct sock *sk)
126 {
127 struct alg_sock *ask = alg_sk(sk);
128 struct skcipher_ctx *ctx = ask->private;
129 struct skcipher_sg_list *sgl;
130 struct scatterlist *sg = NULL;
131
132 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
133 if (!list_empty(&ctx->tsgl))
134 sg = sgl->sg;
135
136 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
137 sgl = sock_kmalloc(sk, sizeof(*sgl) +
138 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
139 GFP_KERNEL);
140 if (!sgl)
141 return -ENOMEM;
142
143 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
144 sgl->cur = 0;
145
146 if (sg) {
147 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
148 sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
149 }
150
151 list_add_tail(&sgl->list, &ctx->tsgl);
152 }
153
154 return 0;
155 }
156
157 static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
158 {
159 struct alg_sock *ask = alg_sk(sk);
160 struct skcipher_ctx *ctx = ask->private;
161 struct skcipher_sg_list *sgl;
162 struct scatterlist *sg;
163 int i;
164
165 while (!list_empty(&ctx->tsgl)) {
166 sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
167 list);
168 sg = sgl->sg;
169
170 for (i = 0; i < sgl->cur; i++) {
171 size_t plen = min_t(size_t, used, sg[i].length);
172
173 if (!sg_page(sg + i))
174 continue;
175
176 sg[i].length -= plen;
177 sg[i].offset += plen;
178
179 used -= plen;
180 ctx->used -= plen;
181
182 if (sg[i].length)
183 return;
184 if (put)
185 put_page(sg_page(sg + i));
186 sg_assign_page(sg + i, NULL);
187 }
188
189 list_del(&sgl->list);
190 sock_kfree_s(sk, sgl,
191 sizeof(*sgl) + sizeof(sgl->sg[0]) *
192 (MAX_SGL_ENTS + 1));
193 }
194
195 if (!ctx->used)
196 ctx->merge = 0;
197 }
198
199 static void skcipher_free_sgl(struct sock *sk)
200 {
201 struct alg_sock *ask = alg_sk(sk);
202 struct skcipher_ctx *ctx = ask->private;
203
204 skcipher_pull_sgl(sk, ctx->used, 1);
205 }
206
207 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
208 {
209 long timeout;
210 DEFINE_WAIT(wait);
211 int err = -ERESTARTSYS;
212
213 if (flags & MSG_DONTWAIT)
214 return -EAGAIN;
215
216 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
217
218 for (;;) {
219 if (signal_pending(current))
220 break;
221 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
222 timeout = MAX_SCHEDULE_TIMEOUT;
223 if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
224 err = 0;
225 break;
226 }
227 }
228 finish_wait(sk_sleep(sk), &wait);
229
230 return err;
231 }
232
233 static void skcipher_wmem_wakeup(struct sock *sk)
234 {
235 struct socket_wq *wq;
236
237 if (!skcipher_writable(sk))
238 return;
239
240 rcu_read_lock();
241 wq = rcu_dereference(sk->sk_wq);
242 if (skwq_has_sleeper(wq))
243 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
244 POLLRDNORM |
245 POLLRDBAND);
246 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
247 rcu_read_unlock();
248 }
249
250 static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
251 {
252 struct alg_sock *ask = alg_sk(sk);
253 struct skcipher_ctx *ctx = ask->private;
254 long timeout;
255 DEFINE_WAIT(wait);
256 int err = -ERESTARTSYS;
257
258 if (flags & MSG_DONTWAIT) {
259 return -EAGAIN;
260 }
261
262 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
263
264 for (;;) {
265 if (signal_pending(current))
266 break;
267 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
268 timeout = MAX_SCHEDULE_TIMEOUT;
269 if (sk_wait_event(sk, &timeout, ctx->used)) {
270 err = 0;
271 break;
272 }
273 }
274 finish_wait(sk_sleep(sk), &wait);
275
276 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
277
278 return err;
279 }
280
281 static void skcipher_data_wakeup(struct sock *sk)
282 {
283 struct alg_sock *ask = alg_sk(sk);
284 struct skcipher_ctx *ctx = ask->private;
285 struct socket_wq *wq;
286
287 if (!ctx->used)
288 return;
289
290 rcu_read_lock();
291 wq = rcu_dereference(sk->sk_wq);
292 if (skwq_has_sleeper(wq))
293 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
294 POLLRDNORM |
295 POLLRDBAND);
296 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
297 rcu_read_unlock();
298 }
299
300 static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
301 size_t size)
302 {
303 struct sock *sk = sock->sk;
304 struct alg_sock *ask = alg_sk(sk);
305 struct sock *psk = ask->parent;
306 struct alg_sock *pask = alg_sk(psk);
307 struct skcipher_ctx *ctx = ask->private;
308 struct skcipher_tfm *skc = pask->private;
309 struct crypto_skcipher *tfm = skc->skcipher;
310 unsigned ivsize = crypto_skcipher_ivsize(tfm);
311 struct skcipher_sg_list *sgl;
312 struct af_alg_control con = {};
313 long copied = 0;
314 bool enc = 0;
315 bool init = 0;
316 int err;
317 int i;
318
319 if (msg->msg_controllen) {
320 err = af_alg_cmsg_send(msg, &con);
321 if (err)
322 return err;
323
324 init = 1;
325 switch (con.op) {
326 case ALG_OP_ENCRYPT:
327 enc = 1;
328 break;
329 case ALG_OP_DECRYPT:
330 enc = 0;
331 break;
332 default:
333 return -EINVAL;
334 }
335
336 if (con.iv && con.iv->ivlen != ivsize)
337 return -EINVAL;
338 }
339
340 err = -EINVAL;
341
342 lock_sock(sk);
343 if (!ctx->more && ctx->used)
344 goto unlock;
345
346 if (init) {
347 ctx->enc = enc;
348 if (con.iv)
349 memcpy(ctx->iv, con.iv->iv, ivsize);
350 }
351
352 while (size) {
353 struct scatterlist *sg;
354 unsigned long len = size;
355 size_t plen;
356
357 if (ctx->merge) {
358 sgl = list_entry(ctx->tsgl.prev,
359 struct skcipher_sg_list, list);
360 sg = sgl->sg + sgl->cur - 1;
361 len = min_t(unsigned long, len,
362 PAGE_SIZE - sg->offset - sg->length);
363
364 err = memcpy_from_msg(page_address(sg_page(sg)) +
365 sg->offset + sg->length,
366 msg, len);
367 if (err)
368 goto unlock;
369
370 sg->length += len;
371 ctx->merge = (sg->offset + sg->length) &
372 (PAGE_SIZE - 1);
373
374 ctx->used += len;
375 copied += len;
376 size -= len;
377 continue;
378 }
379
380 if (!skcipher_writable(sk)) {
381 err = skcipher_wait_for_wmem(sk, msg->msg_flags);
382 if (err)
383 goto unlock;
384 }
385
386 len = min_t(unsigned long, len, skcipher_sndbuf(sk));
387
388 err = skcipher_alloc_sgl(sk);
389 if (err)
390 goto unlock;
391
392 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
393 sg = sgl->sg;
394 if (sgl->cur)
395 sg_unmark_end(sg + sgl->cur - 1);
396 do {
397 i = sgl->cur;
398 plen = min_t(size_t, len, PAGE_SIZE);
399
400 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
401 err = -ENOMEM;
402 if (!sg_page(sg + i))
403 goto unlock;
404
405 err = memcpy_from_msg(page_address(sg_page(sg + i)),
406 msg, plen);
407 if (err) {
408 __free_page(sg_page(sg + i));
409 sg_assign_page(sg + i, NULL);
410 goto unlock;
411 }
412
413 sg[i].length = plen;
414 len -= plen;
415 ctx->used += plen;
416 copied += plen;
417 size -= plen;
418 sgl->cur++;
419 } while (len && sgl->cur < MAX_SGL_ENTS);
420
421 if (!size)
422 sg_mark_end(sg + sgl->cur - 1);
423
424 ctx->merge = plen & (PAGE_SIZE - 1);
425 }
426
427 err = 0;
428
429 ctx->more = msg->msg_flags & MSG_MORE;
430
431 unlock:
432 skcipher_data_wakeup(sk);
433 release_sock(sk);
434
435 return copied ?: err;
436 }
437
438 static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
439 int offset, size_t size, int flags)
440 {
441 struct sock *sk = sock->sk;
442 struct alg_sock *ask = alg_sk(sk);
443 struct skcipher_ctx *ctx = ask->private;
444 struct skcipher_sg_list *sgl;
445 int err = -EINVAL;
446
447 if (flags & MSG_SENDPAGE_NOTLAST)
448 flags |= MSG_MORE;
449
450 lock_sock(sk);
451 if (!ctx->more && ctx->used)
452 goto unlock;
453
454 if (!size)
455 goto done;
456
457 if (!skcipher_writable(sk)) {
458 err = skcipher_wait_for_wmem(sk, flags);
459 if (err)
460 goto unlock;
461 }
462
463 err = skcipher_alloc_sgl(sk);
464 if (err)
465 goto unlock;
466
467 ctx->merge = 0;
468 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
469
470 if (sgl->cur)
471 sg_unmark_end(sgl->sg + sgl->cur - 1);
472
473 sg_mark_end(sgl->sg + sgl->cur);
474 get_page(page);
475 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
476 sgl->cur++;
477 ctx->used += size;
478
479 done:
480 ctx->more = flags & MSG_MORE;
481
482 unlock:
483 skcipher_data_wakeup(sk);
484 release_sock(sk);
485
486 return err ?: size;
487 }
488
489 static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
490 {
491 struct skcipher_sg_list *sgl;
492 struct scatterlist *sg;
493 int nents = 0;
494
495 list_for_each_entry(sgl, &ctx->tsgl, list) {
496 sg = sgl->sg;
497
498 while (!sg->length)
499 sg++;
500
501 nents += sg_nents(sg);
502 }
503 return nents;
504 }
505
506 static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
507 int flags)
508 {
509 struct sock *sk = sock->sk;
510 struct alg_sock *ask = alg_sk(sk);
511 struct sock *psk = ask->parent;
512 struct alg_sock *pask = alg_sk(psk);
513 struct skcipher_ctx *ctx = ask->private;
514 struct skcipher_tfm *skc = pask->private;
515 struct crypto_skcipher *tfm = skc->skcipher;
516 struct skcipher_sg_list *sgl;
517 struct scatterlist *sg;
518 struct skcipher_async_req *sreq;
519 struct skcipher_request *req;
520 struct skcipher_async_rsgl *last_rsgl = NULL;
521 unsigned int txbufs = 0, len = 0, tx_nents;
522 unsigned int reqsize = crypto_skcipher_reqsize(tfm);
523 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
524 int err = -ENOMEM;
525 bool mark = false;
526 char *iv;
527
528 sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
529 if (unlikely(!sreq))
530 goto out;
531
532 req = &sreq->req;
533 iv = (char *)(req + 1) + reqsize;
534 sreq->iocb = msg->msg_iocb;
535 INIT_LIST_HEAD(&sreq->list);
536 sreq->inflight = &ctx->inflight;
537
538 lock_sock(sk);
539 tx_nents = skcipher_all_sg_nents(ctx);
540 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
541 if (unlikely(ZERO_OR_NULL_PTR(sreq->tsg)))
542 goto unlock;
543 sg_init_table(sreq->tsg, tx_nents);
544 memcpy(iv, ctx->iv, ivsize);
545 skcipher_request_set_tfm(req, tfm);
546 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
547 skcipher_async_cb, sreq);
548
549 while (iov_iter_count(&msg->msg_iter)) {
550 struct skcipher_async_rsgl *rsgl;
551 int used;
552
553 if (!ctx->used) {
554 err = skcipher_wait_for_data(sk, flags);
555 if (err)
556 goto free;
557 }
558 sgl = list_first_entry(&ctx->tsgl,
559 struct skcipher_sg_list, list);
560 sg = sgl->sg;
561
562 while (!sg->length)
563 sg++;
564
565 used = min_t(unsigned long, ctx->used,
566 iov_iter_count(&msg->msg_iter));
567 used = min_t(unsigned long, used, sg->length);
568
569 if (txbufs == tx_nents) {
570 struct scatterlist *tmp;
571 int x;
572 /* Ran out of tx slots in async request
573 * need to expand */
574 tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
575 GFP_KERNEL);
576 if (!tmp)
577 goto free;
578
579 sg_init_table(tmp, tx_nents * 2);
580 for (x = 0; x < tx_nents; x++)
581 sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
582 sreq->tsg[x].length,
583 sreq->tsg[x].offset);
584 kfree(sreq->tsg);
585 sreq->tsg = tmp;
586 tx_nents *= 2;
587 mark = true;
588 }
589 /* Need to take over the tx sgl from ctx
590 * to the asynch req - these sgls will be freed later */
591 sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
592 sg->offset);
593
594 if (list_empty(&sreq->list)) {
595 rsgl = &sreq->first_sgl;
596 list_add_tail(&rsgl->list, &sreq->list);
597 } else {
598 rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
599 if (!rsgl) {
600 err = -ENOMEM;
601 goto free;
602 }
603 list_add_tail(&rsgl->list, &sreq->list);
604 }
605
606 used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
607 err = used;
608 if (used < 0)
609 goto free;
610 if (last_rsgl)
611 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
612
613 last_rsgl = rsgl;
614 len += used;
615 skcipher_pull_sgl(sk, used, 0);
616 iov_iter_advance(&msg->msg_iter, used);
617 }
618
619 if (mark)
620 sg_mark_end(sreq->tsg + txbufs - 1);
621
622 skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
623 len, iv);
624 err = ctx->enc ? crypto_skcipher_encrypt(req) :
625 crypto_skcipher_decrypt(req);
626 if (err == -EINPROGRESS) {
627 atomic_inc(&ctx->inflight);
628 err = -EIOCBQUEUED;
629 sreq = NULL;
630 goto unlock;
631 }
632 free:
633 skcipher_free_async_sgls(sreq);
634 unlock:
635 skcipher_wmem_wakeup(sk);
636 release_sock(sk);
637 kzfree(sreq);
638 out:
639 return err;
640 }
641
642 static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
643 int flags)
644 {
645 struct sock *sk = sock->sk;
646 struct alg_sock *ask = alg_sk(sk);
647 struct sock *psk = ask->parent;
648 struct alg_sock *pask = alg_sk(psk);
649 struct skcipher_ctx *ctx = ask->private;
650 struct skcipher_tfm *skc = pask->private;
651 struct crypto_skcipher *tfm = skc->skcipher;
652 unsigned bs = crypto_skcipher_blocksize(tfm);
653 struct skcipher_sg_list *sgl;
654 struct scatterlist *sg;
655 int err = -EAGAIN;
656 int used;
657 long copied = 0;
658
659 lock_sock(sk);
660 while (msg_data_left(msg)) {
661 if (!ctx->used) {
662 err = skcipher_wait_for_data(sk, flags);
663 if (err)
664 goto unlock;
665 }
666
667 used = min_t(unsigned long, ctx->used, msg_data_left(msg));
668
669 used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
670 err = used;
671 if (err < 0)
672 goto unlock;
673
674 if (ctx->more || used < ctx->used)
675 used -= used % bs;
676
677 err = -EINVAL;
678 if (!used)
679 goto free;
680
681 sgl = list_first_entry(&ctx->tsgl,
682 struct skcipher_sg_list, list);
683 sg = sgl->sg;
684
685 while (!sg->length)
686 sg++;
687
688 skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
689 ctx->iv);
690
691 err = af_alg_wait_for_completion(
692 ctx->enc ?
693 crypto_skcipher_encrypt(&ctx->req) :
694 crypto_skcipher_decrypt(&ctx->req),
695 &ctx->completion);
696
697 free:
698 af_alg_free_sg(&ctx->rsgl);
699
700 if (err)
701 goto unlock;
702
703 copied += used;
704 skcipher_pull_sgl(sk, used, 1);
705 iov_iter_advance(&msg->msg_iter, used);
706 }
707
708 err = 0;
709
710 unlock:
711 skcipher_wmem_wakeup(sk);
712 release_sock(sk);
713
714 return copied ?: err;
715 }
716
717 static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
718 size_t ignored, int flags)
719 {
720 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
721 skcipher_recvmsg_async(sock, msg, flags) :
722 skcipher_recvmsg_sync(sock, msg, flags);
723 }
724
725 static unsigned int skcipher_poll(struct file *file, struct socket *sock,
726 poll_table *wait)
727 {
728 struct sock *sk = sock->sk;
729 struct alg_sock *ask = alg_sk(sk);
730 struct skcipher_ctx *ctx = ask->private;
731 unsigned int mask;
732
733 sock_poll_wait(file, sk_sleep(sk), wait);
734 mask = 0;
735
736 if (ctx->used)
737 mask |= POLLIN | POLLRDNORM;
738
739 if (skcipher_writable(sk))
740 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
741
742 return mask;
743 }
744
745 static struct proto_ops algif_skcipher_ops = {
746 .family = PF_ALG,
747
748 .connect = sock_no_connect,
749 .socketpair = sock_no_socketpair,
750 .getname = sock_no_getname,
751 .ioctl = sock_no_ioctl,
752 .listen = sock_no_listen,
753 .shutdown = sock_no_shutdown,
754 .getsockopt = sock_no_getsockopt,
755 .mmap = sock_no_mmap,
756 .bind = sock_no_bind,
757 .accept = sock_no_accept,
758 .setsockopt = sock_no_setsockopt,
759
760 .release = af_alg_release,
761 .sendmsg = skcipher_sendmsg,
762 .sendpage = skcipher_sendpage,
763 .recvmsg = skcipher_recvmsg,
764 .poll = skcipher_poll,
765 };
766
767 static int skcipher_check_key(struct socket *sock)
768 {
769 int err = 0;
770 struct sock *psk;
771 struct alg_sock *pask;
772 struct skcipher_tfm *tfm;
773 struct sock *sk = sock->sk;
774 struct alg_sock *ask = alg_sk(sk);
775
776 lock_sock(sk);
777 if (!atomic_read(&ask->nokey_refcnt))
778 goto unlock_child;
779
780 psk = ask->parent;
781 pask = alg_sk(ask->parent);
782 tfm = pask->private;
783
784 err = -ENOKEY;
785 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
786 if (!tfm->has_key)
787 goto unlock;
788
789 atomic_dec(&pask->nokey_refcnt);
790 atomic_set(&ask->nokey_refcnt, 0);
791
792 err = 0;
793
794 unlock:
795 release_sock(psk);
796 unlock_child:
797 release_sock(sk);
798
799 return err;
800 }
801
802 static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
803 size_t size)
804 {
805 int err;
806
807 err = skcipher_check_key(sock);
808 if (err)
809 return err;
810
811 return skcipher_sendmsg(sock, msg, size);
812 }
813
814 static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
815 int offset, size_t size, int flags)
816 {
817 int err;
818
819 err = skcipher_check_key(sock);
820 if (err)
821 return err;
822
823 return skcipher_sendpage(sock, page, offset, size, flags);
824 }
825
826 static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
827 size_t ignored, int flags)
828 {
829 int err;
830
831 err = skcipher_check_key(sock);
832 if (err)
833 return err;
834
835 return skcipher_recvmsg(sock, msg, ignored, flags);
836 }
837
838 static struct proto_ops algif_skcipher_ops_nokey = {
839 .family = PF_ALG,
840
841 .connect = sock_no_connect,
842 .socketpair = sock_no_socketpair,
843 .getname = sock_no_getname,
844 .ioctl = sock_no_ioctl,
845 .listen = sock_no_listen,
846 .shutdown = sock_no_shutdown,
847 .getsockopt = sock_no_getsockopt,
848 .mmap = sock_no_mmap,
849 .bind = sock_no_bind,
850 .accept = sock_no_accept,
851 .setsockopt = sock_no_setsockopt,
852
853 .release = af_alg_release,
854 .sendmsg = skcipher_sendmsg_nokey,
855 .sendpage = skcipher_sendpage_nokey,
856 .recvmsg = skcipher_recvmsg_nokey,
857 .poll = skcipher_poll,
858 };
859
860 static void *skcipher_bind(const char *name, u32 type, u32 mask)
861 {
862 struct skcipher_tfm *tfm;
863 struct crypto_skcipher *skcipher;
864
865 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
866 if (!tfm)
867 return ERR_PTR(-ENOMEM);
868
869 skcipher = crypto_alloc_skcipher(name, type, mask);
870 if (IS_ERR(skcipher)) {
871 kfree(tfm);
872 return ERR_CAST(skcipher);
873 }
874
875 tfm->skcipher = skcipher;
876
877 return tfm;
878 }
879
880 static void skcipher_release(void *private)
881 {
882 struct skcipher_tfm *tfm = private;
883
884 crypto_free_skcipher(tfm->skcipher);
885 kfree(tfm);
886 }
887
888 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
889 {
890 struct skcipher_tfm *tfm = private;
891 int err;
892
893 err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
894 tfm->has_key = !err;
895
896 return err;
897 }
898
899 static void skcipher_wait(struct sock *sk)
900 {
901 struct alg_sock *ask = alg_sk(sk);
902 struct skcipher_ctx *ctx = ask->private;
903 int ctr = 0;
904
905 while (atomic_read(&ctx->inflight) && ctr++ < 100)
906 msleep(100);
907 }
908
909 static void skcipher_sock_destruct(struct sock *sk)
910 {
911 struct alg_sock *ask = alg_sk(sk);
912 struct skcipher_ctx *ctx = ask->private;
913 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
914
915 if (atomic_read(&ctx->inflight))
916 skcipher_wait(sk);
917
918 skcipher_free_sgl(sk);
919 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
920 sock_kfree_s(sk, ctx, ctx->len);
921 af_alg_release_parent(sk);
922 }
923
924 static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
925 {
926 struct skcipher_ctx *ctx;
927 struct alg_sock *ask = alg_sk(sk);
928 struct skcipher_tfm *tfm = private;
929 struct crypto_skcipher *skcipher = tfm->skcipher;
930 unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
931
932 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
933 if (!ctx)
934 return -ENOMEM;
935
936 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
937 GFP_KERNEL);
938 if (!ctx->iv) {
939 sock_kfree_s(sk, ctx, len);
940 return -ENOMEM;
941 }
942
943 memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
944
945 INIT_LIST_HEAD(&ctx->tsgl);
946 ctx->len = len;
947 ctx->used = 0;
948 ctx->more = 0;
949 ctx->merge = 0;
950 ctx->enc = 0;
951 atomic_set(&ctx->inflight, 0);
952 af_alg_init_completion(&ctx->completion);
953
954 ask->private = ctx;
955
956 skcipher_request_set_tfm(&ctx->req, skcipher);
957 skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
958 CRYPTO_TFM_REQ_MAY_BACKLOG,
959 af_alg_complete, &ctx->completion);
960
961 sk->sk_destruct = skcipher_sock_destruct;
962
963 return 0;
964 }
965
966 static int skcipher_accept_parent(void *private, struct sock *sk)
967 {
968 struct skcipher_tfm *tfm = private;
969
970 if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
971 return -ENOKEY;
972
973 return skcipher_accept_parent_nokey(private, sk);
974 }
975
976 static const struct af_alg_type algif_type_skcipher = {
977 .bind = skcipher_bind,
978 .release = skcipher_release,
979 .setkey = skcipher_setkey,
980 .accept = skcipher_accept_parent,
981 .accept_nokey = skcipher_accept_parent_nokey,
982 .ops = &algif_skcipher_ops,
983 .ops_nokey = &algif_skcipher_ops_nokey,
984 .name = "skcipher",
985 .owner = THIS_MODULE
986 };
987
988 static int __init algif_skcipher_init(void)
989 {
990 return af_alg_register_type(&algif_type_skcipher);
991 }
992
993 static void __exit algif_skcipher_exit(void)
994 {
995 int err = af_alg_unregister_type(&algif_type_skcipher);
996 BUG_ON(err);
997 }
998
999 module_init(algif_skcipher_init);
1000 module_exit(algif_skcipher_exit);
1001 MODULE_LICENSE("GPL");