2 * algif_skcipher: User-space interface for skcipher algorithms
4 * This file provides the user-space API for symmetric key ciphers.
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <crypto/scatterwalk.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/if_alg.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/net.h>
26 struct skcipher_sg_list
{
27 struct list_head list
;
31 struct scatterlist sg
[0];
35 struct crypto_ablkcipher
*skcipher
;
40 struct list_head tsgl
;
41 struct af_alg_sgl rsgl
;
45 struct af_alg_completion completion
;
54 struct ablkcipher_request req
;
57 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
58 sizeof(struct scatterlist) - 1)
60 static inline int skcipher_sndbuf(struct sock
*sk
)
62 struct alg_sock
*ask
= alg_sk(sk
);
63 struct skcipher_ctx
*ctx
= ask
->private;
65 return max_t(int, max_t(int, sk
->sk_sndbuf
& PAGE_MASK
, PAGE_SIZE
) -
69 static inline bool skcipher_writable(struct sock
*sk
)
71 return PAGE_SIZE
<= skcipher_sndbuf(sk
);
74 static int skcipher_alloc_sgl(struct sock
*sk
)
76 struct alg_sock
*ask
= alg_sk(sk
);
77 struct skcipher_ctx
*ctx
= ask
->private;
78 struct skcipher_sg_list
*sgl
;
79 struct scatterlist
*sg
= NULL
;
81 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
82 if (!list_empty(&ctx
->tsgl
))
85 if (!sg
|| sgl
->cur
>= MAX_SGL_ENTS
) {
86 sgl
= sock_kmalloc(sk
, sizeof(*sgl
) +
87 sizeof(sgl
->sg
[0]) * (MAX_SGL_ENTS
+ 1),
92 sg_init_table(sgl
->sg
, MAX_SGL_ENTS
+ 1);
96 scatterwalk_sg_chain(sg
, MAX_SGL_ENTS
+ 1, sgl
->sg
);
97 sg_unmark_end(sg
+ (MAX_SGL_ENTS
- 1));
100 list_add_tail(&sgl
->list
, &ctx
->tsgl
);
106 static void skcipher_pull_sgl(struct sock
*sk
, int used
)
108 struct alg_sock
*ask
= alg_sk(sk
);
109 struct skcipher_ctx
*ctx
= ask
->private;
110 struct skcipher_sg_list
*sgl
;
111 struct scatterlist
*sg
;
114 while (!list_empty(&ctx
->tsgl
)) {
115 sgl
= list_first_entry(&ctx
->tsgl
, struct skcipher_sg_list
,
119 for (i
= 0; i
< sgl
->cur
; i
++) {
120 int plen
= min_t(int, used
, sg
[i
].length
);
122 if (!sg_page(sg
+ i
))
125 sg
[i
].length
-= plen
;
126 sg
[i
].offset
+= plen
;
134 put_page(sg_page(sg
+ i
));
135 sg_assign_page(sg
+ i
, NULL
);
138 list_del(&sgl
->list
);
139 sock_kfree_s(sk
, sgl
,
140 sizeof(*sgl
) + sizeof(sgl
->sg
[0]) *
148 static void skcipher_free_sgl(struct sock
*sk
)
150 struct alg_sock
*ask
= alg_sk(sk
);
151 struct skcipher_ctx
*ctx
= ask
->private;
153 skcipher_pull_sgl(sk
, ctx
->used
);
156 static int skcipher_wait_for_wmem(struct sock
*sk
, unsigned flags
)
160 int err
= -ERESTARTSYS
;
162 if (flags
& MSG_DONTWAIT
)
165 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
168 if (signal_pending(current
))
170 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
171 timeout
= MAX_SCHEDULE_TIMEOUT
;
172 if (sk_wait_event(sk
, &timeout
, skcipher_writable(sk
))) {
177 finish_wait(sk_sleep(sk
), &wait
);
182 static void skcipher_wmem_wakeup(struct sock
*sk
)
184 struct socket_wq
*wq
;
186 if (!skcipher_writable(sk
))
190 wq
= rcu_dereference(sk
->sk_wq
);
191 if (wq_has_sleeper(wq
))
192 wake_up_interruptible_sync_poll(&wq
->wait
, POLLIN
|
195 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
199 static int skcipher_wait_for_data(struct sock
*sk
, unsigned flags
)
201 struct alg_sock
*ask
= alg_sk(sk
);
202 struct skcipher_ctx
*ctx
= ask
->private;
205 int err
= -ERESTARTSYS
;
207 if (flags
& MSG_DONTWAIT
) {
211 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
214 if (signal_pending(current
))
216 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
217 timeout
= MAX_SCHEDULE_TIMEOUT
;
218 if (sk_wait_event(sk
, &timeout
, ctx
->used
)) {
223 finish_wait(sk_sleep(sk
), &wait
);
225 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
230 static void skcipher_data_wakeup(struct sock
*sk
)
232 struct alg_sock
*ask
= alg_sk(sk
);
233 struct skcipher_ctx
*ctx
= ask
->private;
234 struct socket_wq
*wq
;
240 wq
= rcu_dereference(sk
->sk_wq
);
241 if (wq_has_sleeper(wq
))
242 wake_up_interruptible_sync_poll(&wq
->wait
, POLLOUT
|
245 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
249 static int skcipher_sendmsg(struct kiocb
*unused
, struct socket
*sock
,
250 struct msghdr
*msg
, size_t size
)
252 struct sock
*sk
= sock
->sk
;
253 struct alg_sock
*ask
= alg_sk(sk
);
254 struct skcipher_ctx
*ctx
= ask
->private;
255 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(&ctx
->req
);
256 unsigned ivsize
= crypto_ablkcipher_ivsize(tfm
);
257 struct skcipher_sg_list
*sgl
;
258 struct af_alg_control con
= {};
264 if (msg
->msg_controllen
) {
265 err
= af_alg_cmsg_send(msg
, &con
);
280 if (con
.iv
&& con
.iv
->ivlen
!= ivsize
)
287 if (!ctx
->more
&& ctx
->used
)
293 memcpy(ctx
->iv
, con
.iv
->iv
, ivsize
);
297 struct scatterlist
*sg
;
298 unsigned long len
= size
;
302 sgl
= list_entry(ctx
->tsgl
.prev
,
303 struct skcipher_sg_list
, list
);
304 sg
= sgl
->sg
+ sgl
->cur
- 1;
305 len
= min_t(unsigned long, len
,
306 PAGE_SIZE
- sg
->offset
- sg
->length
);
308 err
= memcpy_fromiovec(page_address(sg_page(sg
)) +
309 sg
->offset
+ sg
->length
,
315 ctx
->merge
= (sg
->offset
+ sg
->length
) &
324 if (!skcipher_writable(sk
)) {
325 err
= skcipher_wait_for_wmem(sk
, msg
->msg_flags
);
330 len
= min_t(unsigned long, len
, skcipher_sndbuf(sk
));
332 err
= skcipher_alloc_sgl(sk
);
336 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
340 plen
= min_t(int, len
, PAGE_SIZE
);
342 sg_assign_page(sg
+ i
, alloc_page(GFP_KERNEL
));
344 if (!sg_page(sg
+ i
))
347 err
= memcpy_fromiovec(page_address(sg_page(sg
+ i
)),
350 __free_page(sg_page(sg
+ i
));
351 sg_assign_page(sg
+ i
, NULL
);
361 } while (len
&& sgl
->cur
< MAX_SGL_ENTS
);
363 ctx
->merge
= plen
& (PAGE_SIZE
- 1);
368 ctx
->more
= msg
->msg_flags
& MSG_MORE
;
369 if (!ctx
->more
&& !list_empty(&ctx
->tsgl
))
370 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
373 skcipher_data_wakeup(sk
);
376 return copied
?: err
;
379 static ssize_t
skcipher_sendpage(struct socket
*sock
, struct page
*page
,
380 int offset
, size_t size
, int flags
)
382 struct sock
*sk
= sock
->sk
;
383 struct alg_sock
*ask
= alg_sk(sk
);
384 struct skcipher_ctx
*ctx
= ask
->private;
385 struct skcipher_sg_list
*sgl
;
388 if (flags
& MSG_SENDPAGE_NOTLAST
)
392 if (!ctx
->more
&& ctx
->used
)
398 if (!skcipher_writable(sk
)) {
399 err
= skcipher_wait_for_wmem(sk
, flags
);
404 err
= skcipher_alloc_sgl(sk
);
409 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
412 sg_set_page(sgl
->sg
+ sgl
->cur
, page
, size
, offset
);
417 ctx
->more
= flags
& MSG_MORE
;
418 if (!ctx
->more
&& !list_empty(&ctx
->tsgl
))
419 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
422 skcipher_data_wakeup(sk
);
428 static int skcipher_recvmsg(struct kiocb
*unused
, struct socket
*sock
,
429 struct msghdr
*msg
, size_t ignored
, int flags
)
431 struct sock
*sk
= sock
->sk
;
432 struct alg_sock
*ask
= alg_sk(sk
);
433 struct skcipher_ctx
*ctx
= ask
->private;
434 unsigned bs
= crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
436 struct skcipher_sg_list
*sgl
;
437 struct scatterlist
*sg
;
438 unsigned long iovlen
;
445 for (iov
= msg
->msg_iov
, iovlen
= msg
->msg_iovlen
; iovlen
> 0;
447 unsigned long seglen
= iov
->iov_len
;
448 char __user
*from
= iov
->iov_base
;
453 err
= skcipher_wait_for_data(sk
, flags
);
458 used
= min_t(unsigned long, used
, seglen
);
460 used
= af_alg_make_sg(&ctx
->rsgl
, from
, used
, 1);
465 if (ctx
->more
|| used
< ctx
->used
)
472 sgl
= list_first_entry(&ctx
->tsgl
,
473 struct skcipher_sg_list
, list
);
479 ablkcipher_request_set_crypt(&ctx
->req
, sg
,
483 err
= af_alg_wait_for_completion(
485 crypto_ablkcipher_encrypt(&ctx
->req
) :
486 crypto_ablkcipher_decrypt(&ctx
->req
),
490 af_alg_free_sg(&ctx
->rsgl
);
498 skcipher_pull_sgl(sk
, used
);
505 skcipher_wmem_wakeup(sk
);
508 return copied
?: err
;
512 static unsigned int skcipher_poll(struct file
*file
, struct socket
*sock
,
515 struct sock
*sk
= sock
->sk
;
516 struct alg_sock
*ask
= alg_sk(sk
);
517 struct skcipher_ctx
*ctx
= ask
->private;
520 sock_poll_wait(file
, sk_sleep(sk
), wait
);
524 mask
|= POLLIN
| POLLRDNORM
;
526 if (skcipher_writable(sk
))
527 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
532 static struct proto_ops algif_skcipher_ops
= {
535 .connect
= sock_no_connect
,
536 .socketpair
= sock_no_socketpair
,
537 .getname
= sock_no_getname
,
538 .ioctl
= sock_no_ioctl
,
539 .listen
= sock_no_listen
,
540 .shutdown
= sock_no_shutdown
,
541 .getsockopt
= sock_no_getsockopt
,
542 .mmap
= sock_no_mmap
,
543 .bind
= sock_no_bind
,
544 .accept
= sock_no_accept
,
545 .setsockopt
= sock_no_setsockopt
,
547 .release
= af_alg_release
,
548 .sendmsg
= skcipher_sendmsg
,
549 .sendpage
= skcipher_sendpage
,
550 .recvmsg
= skcipher_recvmsg
,
551 .poll
= skcipher_poll
,
554 static int skcipher_check_key(struct socket
*sock
)
558 struct alg_sock
*pask
;
559 struct skcipher_tfm
*tfm
;
560 struct sock
*sk
= sock
->sk
;
561 struct alg_sock
*ask
= alg_sk(sk
);
568 pask
= alg_sk(ask
->parent
);
572 lock_sock_nested(psk
, SINGLE_DEPTH_NESTING
);
592 static int skcipher_sendmsg_nokey(struct kiocb
*unused
, struct socket
*sock
,
593 struct msghdr
*msg
, size_t size
)
597 err
= skcipher_check_key(sock
);
601 return skcipher_sendmsg(unused
, sock
, msg
, size
);
604 static ssize_t
skcipher_sendpage_nokey(struct socket
*sock
, struct page
*page
,
605 int offset
, size_t size
, int flags
)
609 err
= skcipher_check_key(sock
);
613 return skcipher_sendpage(sock
, page
, offset
, size
, flags
);
616 static int skcipher_recvmsg_nokey(struct kiocb
*unused
, struct socket
*sock
,
617 struct msghdr
*msg
, size_t ignored
, int flags
)
621 err
= skcipher_check_key(sock
);
625 return skcipher_recvmsg(unused
, sock
, msg
, ignored
, flags
);
628 static struct proto_ops algif_skcipher_ops_nokey
= {
631 .connect
= sock_no_connect
,
632 .socketpair
= sock_no_socketpair
,
633 .getname
= sock_no_getname
,
634 .ioctl
= sock_no_ioctl
,
635 .listen
= sock_no_listen
,
636 .shutdown
= sock_no_shutdown
,
637 .getsockopt
= sock_no_getsockopt
,
638 .mmap
= sock_no_mmap
,
639 .bind
= sock_no_bind
,
640 .accept
= sock_no_accept
,
641 .setsockopt
= sock_no_setsockopt
,
643 .release
= af_alg_release
,
644 .sendmsg
= skcipher_sendmsg_nokey
,
645 .sendpage
= skcipher_sendpage_nokey
,
646 .recvmsg
= skcipher_recvmsg_nokey
,
647 .poll
= skcipher_poll
,
650 static void *skcipher_bind(const char *name
, u32 type
, u32 mask
)
652 struct skcipher_tfm
*tfm
;
653 struct crypto_ablkcipher
*skcipher
;
655 tfm
= kzalloc(sizeof(*tfm
), GFP_KERNEL
);
657 return ERR_PTR(-ENOMEM
);
659 skcipher
= crypto_alloc_ablkcipher(name
, type
, mask
);
660 if (IS_ERR(skcipher
)) {
662 return ERR_CAST(skcipher
);
665 tfm
->skcipher
= skcipher
;
670 static void skcipher_release(void *private)
672 struct skcipher_tfm
*tfm
= private;
674 crypto_free_ablkcipher(tfm
->skcipher
);
678 static int skcipher_setkey(void *private, const u8
*key
, unsigned int keylen
)
680 struct skcipher_tfm
*tfm
= private;
683 err
= crypto_ablkcipher_setkey(tfm
->skcipher
, key
, keylen
);
689 static void skcipher_sock_destruct(struct sock
*sk
)
691 struct alg_sock
*ask
= alg_sk(sk
);
692 struct skcipher_ctx
*ctx
= ask
->private;
693 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(&ctx
->req
);
695 skcipher_free_sgl(sk
);
696 sock_kfree_s(sk
, ctx
->iv
, crypto_ablkcipher_ivsize(tfm
));
697 sock_kfree_s(sk
, ctx
, ctx
->len
);
698 af_alg_release_parent(sk
);
701 static int skcipher_accept_parent_nokey(void *private, struct sock
*sk
)
703 struct skcipher_ctx
*ctx
;
704 struct alg_sock
*ask
= alg_sk(sk
);
705 struct skcipher_tfm
*tfm
= private;
706 struct crypto_ablkcipher
*skcipher
= tfm
->skcipher
;
707 unsigned int len
= sizeof(*ctx
) + crypto_ablkcipher_reqsize(skcipher
);
709 ctx
= sock_kmalloc(sk
, len
, GFP_KERNEL
);
712 ctx
->iv
= sock_kmalloc(sk
, crypto_ablkcipher_ivsize(skcipher
),
715 sock_kfree_s(sk
, ctx
, len
);
719 memset(ctx
->iv
, 0, crypto_ablkcipher_ivsize(skcipher
));
721 INIT_LIST_HEAD(&ctx
->tsgl
);
727 af_alg_init_completion(&ctx
->completion
);
731 ablkcipher_request_set_tfm(&ctx
->req
, skcipher
);
732 ablkcipher_request_set_callback(&ctx
->req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
733 af_alg_complete
, &ctx
->completion
);
735 sk
->sk_destruct
= skcipher_sock_destruct
;
740 static int skcipher_accept_parent(void *private, struct sock
*sk
)
742 struct skcipher_tfm
*tfm
= private;
744 if (!tfm
->has_key
&& crypto_ablkcipher_has_setkey(tfm
->skcipher
))
747 return skcipher_accept_parent_nokey(private, sk
);
750 static const struct af_alg_type algif_type_skcipher
= {
751 .bind
= skcipher_bind
,
752 .release
= skcipher_release
,
753 .setkey
= skcipher_setkey
,
754 .accept
= skcipher_accept_parent
,
755 .accept_nokey
= skcipher_accept_parent_nokey
,
756 .ops
= &algif_skcipher_ops
,
757 .ops_nokey
= &algif_skcipher_ops_nokey
,
762 static int __init
algif_skcipher_init(void)
764 return af_alg_register_type(&algif_type_skcipher
);
767 static void __exit
algif_skcipher_exit(void)
769 int err
= af_alg_unregister_type(&algif_type_skcipher
);
773 module_init(algif_skcipher_init
);
774 module_exit(algif_skcipher_exit
);
775 MODULE_LICENSE("GPL");