Merge tag 'soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / esp6.c
1 /*
2 * Copyright (C)2002 USAGI/WIDE Project
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors
19 *
20 * Mitsuru KANDA @USAGI : IPv6 Support
21 * Kazunori MIYAZAWA @USAGI :
22 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
23 *
24 * This file is derived from net/ipv4/esp.c
25 */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <crypto/aead.h>
30 #include <crypto/authenc.h>
31 #include <linux/err.h>
32 #include <linux/module.h>
33 #include <net/ip.h>
34 #include <net/xfrm.h>
35 #include <net/esp.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kernel.h>
38 #include <linux/pfkeyv2.h>
39 #include <linux/random.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <net/icmp.h>
43 #include <net/ipv6.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
46
47 struct esp_skb_cb {
48 struct xfrm_skb_cb xfrm;
49 void *tmp;
50 };
51
52 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
53
54 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
55
56 /*
57 * Allocate an AEAD request structure with extra space for SG and IV.
58 *
59 * For alignment considerations the upper 32 bits of the sequence number are
60 * placed at the front, if present. Followed by the IV, the request and finally
61 * the SG list.
62 *
63 * TODO: Use spare space in skb for this where possible.
64 */
65 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
66 {
67 unsigned int len;
68
69 len = seqihlen;
70
71 len += crypto_aead_ivsize(aead);
72
73 if (len) {
74 len += crypto_aead_alignmask(aead) &
75 ~(crypto_tfm_ctx_alignment() - 1);
76 len = ALIGN(len, crypto_tfm_ctx_alignment());
77 }
78
79 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
80 len = ALIGN(len, __alignof__(struct scatterlist));
81
82 len += sizeof(struct scatterlist) * nfrags;
83
84 return kmalloc(len, GFP_ATOMIC);
85 }
86
87 static inline __be32 *esp_tmp_seqhi(void *tmp)
88 {
89 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
90 }
91
92 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
93 {
94 return crypto_aead_ivsize(aead) ?
95 PTR_ALIGN((u8 *)tmp + seqhilen,
96 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
97 }
98
99 static inline struct aead_givcrypt_request *esp_tmp_givreq(
100 struct crypto_aead *aead, u8 *iv)
101 {
102 struct aead_givcrypt_request *req;
103
104 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
105 crypto_tfm_ctx_alignment());
106 aead_givcrypt_set_tfm(req, aead);
107 return req;
108 }
109
110 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
111 {
112 struct aead_request *req;
113
114 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
115 crypto_tfm_ctx_alignment());
116 aead_request_set_tfm(req, aead);
117 return req;
118 }
119
120 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
121 struct aead_request *req)
122 {
123 return (void *)ALIGN((unsigned long)(req + 1) +
124 crypto_aead_reqsize(aead),
125 __alignof__(struct scatterlist));
126 }
127
128 static inline struct scatterlist *esp_givreq_sg(
129 struct crypto_aead *aead, struct aead_givcrypt_request *req)
130 {
131 return (void *)ALIGN((unsigned long)(req + 1) +
132 crypto_aead_reqsize(aead),
133 __alignof__(struct scatterlist));
134 }
135
136 static void esp_output_done(struct crypto_async_request *base, int err)
137 {
138 struct sk_buff *skb = base->data;
139
140 kfree(ESP_SKB_CB(skb)->tmp);
141 xfrm_output_resume(skb, err);
142 }
143
144 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
145 {
146 int err;
147 struct ip_esp_hdr *esph;
148 struct crypto_aead *aead;
149 struct aead_givcrypt_request *req;
150 struct scatterlist *sg;
151 struct scatterlist *asg;
152 struct sk_buff *trailer;
153 void *tmp;
154 int blksize;
155 int clen;
156 int alen;
157 int plen;
158 int tfclen;
159 int nfrags;
160 int assoclen;
161 int sglists;
162 int seqhilen;
163 u8 *iv;
164 u8 *tail;
165 __be32 *seqhi;
166 struct esp_data *esp = x->data;
167
168 /* skb is pure payload to encrypt */
169 err = -ENOMEM;
170
171 aead = esp->aead;
172 alen = crypto_aead_authsize(aead);
173
174 tfclen = 0;
175 if (x->tfcpad) {
176 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
177 u32 padto;
178
179 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
180 if (skb->len < padto)
181 tfclen = padto - skb->len;
182 }
183 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
184 clen = ALIGN(skb->len + 2 + tfclen, blksize);
185 if (esp->padlen)
186 clen = ALIGN(clen, esp->padlen);
187 plen = clen - skb->len - tfclen;
188
189 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
190 if (err < 0)
191 goto error;
192 nfrags = err;
193
194 assoclen = sizeof(*esph);
195 sglists = 1;
196 seqhilen = 0;
197
198 if (x->props.flags & XFRM_STATE_ESN) {
199 sglists += 2;
200 seqhilen += sizeof(__be32);
201 assoclen += seqhilen;
202 }
203
204 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
205 if (!tmp)
206 goto error;
207
208 seqhi = esp_tmp_seqhi(tmp);
209 iv = esp_tmp_iv(aead, tmp, seqhilen);
210 req = esp_tmp_givreq(aead, iv);
211 asg = esp_givreq_sg(aead, req);
212 sg = asg + sglists;
213
214 /* Fill padding... */
215 tail = skb_tail_pointer(trailer);
216 if (tfclen) {
217 memset(tail, 0, tfclen);
218 tail += tfclen;
219 }
220 do {
221 int i;
222 for (i = 0; i < plen - 2; i++)
223 tail[i] = i + 1;
224 } while (0);
225 tail[plen - 2] = plen - 2;
226 tail[plen - 1] = *skb_mac_header(skb);
227 pskb_put(skb, trailer, clen - skb->len + alen);
228
229 skb_push(skb, -skb_network_offset(skb));
230 esph = ip_esp_hdr(skb);
231 *skb_mac_header(skb) = IPPROTO_ESP;
232
233 esph->spi = x->id.spi;
234 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
235
236 sg_init_table(sg, nfrags);
237 skb_to_sgvec(skb, sg,
238 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
239 clen + alen);
240
241 if ((x->props.flags & XFRM_STATE_ESN)) {
242 sg_init_table(asg, 3);
243 sg_set_buf(asg, &esph->spi, sizeof(__be32));
244 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
245 sg_set_buf(asg + 1, seqhi, seqhilen);
246 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
247 } else
248 sg_init_one(asg, esph, sizeof(*esph));
249
250 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
251 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
252 aead_givcrypt_set_assoc(req, asg, assoclen);
253 aead_givcrypt_set_giv(req, esph->enc_data,
254 XFRM_SKB_CB(skb)->seq.output.low);
255
256 ESP_SKB_CB(skb)->tmp = tmp;
257 err = crypto_aead_givencrypt(req);
258 if (err == -EINPROGRESS)
259 goto error;
260
261 if (err == -EBUSY)
262 err = NET_XMIT_DROP;
263
264 kfree(tmp);
265
266 error:
267 return err;
268 }
269
270 static int esp_input_done2(struct sk_buff *skb, int err)
271 {
272 struct xfrm_state *x = xfrm_input_state(skb);
273 struct esp_data *esp = x->data;
274 struct crypto_aead *aead = esp->aead;
275 int alen = crypto_aead_authsize(aead);
276 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
277 int elen = skb->len - hlen;
278 int hdr_len = skb_network_header_len(skb);
279 int padlen;
280 u8 nexthdr[2];
281
282 kfree(ESP_SKB_CB(skb)->tmp);
283
284 if (unlikely(err))
285 goto out;
286
287 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
288 BUG();
289
290 err = -EINVAL;
291 padlen = nexthdr[0];
292 if (padlen + 2 + alen >= elen) {
293 LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
294 "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
295 goto out;
296 }
297
298 /* ... check padding bits here. Silly. :-) */
299
300 pskb_trim(skb, skb->len - alen - padlen - 2);
301 __skb_pull(skb, hlen);
302 skb_set_transport_header(skb, -hdr_len);
303
304 err = nexthdr[1];
305
306 /* RFC4303: Drop dummy packets without any error */
307 if (err == IPPROTO_NONE)
308 err = -EINVAL;
309
310 out:
311 return err;
312 }
313
314 static void esp_input_done(struct crypto_async_request *base, int err)
315 {
316 struct sk_buff *skb = base->data;
317
318 xfrm_input_resume(skb, esp_input_done2(skb, err));
319 }
320
321 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
322 {
323 struct ip_esp_hdr *esph;
324 struct esp_data *esp = x->data;
325 struct crypto_aead *aead = esp->aead;
326 struct aead_request *req;
327 struct sk_buff *trailer;
328 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
329 int nfrags;
330 int assoclen;
331 int sglists;
332 int seqhilen;
333 int ret = 0;
334 void *tmp;
335 __be32 *seqhi;
336 u8 *iv;
337 struct scatterlist *sg;
338 struct scatterlist *asg;
339
340 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
341 ret = -EINVAL;
342 goto out;
343 }
344
345 if (elen <= 0) {
346 ret = -EINVAL;
347 goto out;
348 }
349
350 if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
351 ret = -EINVAL;
352 goto out;
353 }
354
355 ret = -ENOMEM;
356
357 assoclen = sizeof(*esph);
358 sglists = 1;
359 seqhilen = 0;
360
361 if (x->props.flags & XFRM_STATE_ESN) {
362 sglists += 2;
363 seqhilen += sizeof(__be32);
364 assoclen += seqhilen;
365 }
366
367 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
368 if (!tmp)
369 goto out;
370
371 ESP_SKB_CB(skb)->tmp = tmp;
372 seqhi = esp_tmp_seqhi(tmp);
373 iv = esp_tmp_iv(aead, tmp, seqhilen);
374 req = esp_tmp_req(aead, iv);
375 asg = esp_req_sg(aead, req);
376 sg = asg + sglists;
377
378 skb->ip_summed = CHECKSUM_NONE;
379
380 esph = (struct ip_esp_hdr *)skb->data;
381
382 /* Get ivec. This can be wrong, check against another impls. */
383 iv = esph->enc_data;
384
385 sg_init_table(sg, nfrags);
386 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
387
388 if ((x->props.flags & XFRM_STATE_ESN)) {
389 sg_init_table(asg, 3);
390 sg_set_buf(asg, &esph->spi, sizeof(__be32));
391 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
392 sg_set_buf(asg + 1, seqhi, seqhilen);
393 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
394 } else
395 sg_init_one(asg, esph, sizeof(*esph));
396
397 aead_request_set_callback(req, 0, esp_input_done, skb);
398 aead_request_set_crypt(req, sg, sg, elen, iv);
399 aead_request_set_assoc(req, asg, assoclen);
400
401 ret = crypto_aead_decrypt(req);
402 if (ret == -EINPROGRESS)
403 goto out;
404
405 ret = esp_input_done2(skb, ret);
406
407 out:
408 return ret;
409 }
410
411 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
412 {
413 struct esp_data *esp = x->data;
414 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
415 u32 align = max_t(u32, blksize, esp->padlen);
416 u32 rem;
417
418 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
419 rem = mtu & (align - 1);
420 mtu &= ~(align - 1);
421
422 if (x->props.mode != XFRM_MODE_TUNNEL) {
423 u32 padsize = ((blksize - 1) & 7) + 1;
424 mtu -= blksize - padsize;
425 mtu += min_t(u32, blksize - padsize, rem);
426 }
427
428 return mtu - 2;
429 }
430
431 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
432 u8 type, u8 code, int offset, __be32 info)
433 {
434 struct net *net = dev_net(skb->dev);
435 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
436 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
437 struct xfrm_state *x;
438
439 if (type != ICMPV6_DEST_UNREACH &&
440 type != ICMPV6_PKT_TOOBIG)
441 return;
442
443 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
444 esph->spi, IPPROTO_ESP, AF_INET6);
445 if (!x)
446 return;
447 pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n",
448 ntohl(esph->spi), &iph->daddr);
449 xfrm_state_put(x);
450 }
451
452 static void esp6_destroy(struct xfrm_state *x)
453 {
454 struct esp_data *esp = x->data;
455
456 if (!esp)
457 return;
458
459 crypto_free_aead(esp->aead);
460 kfree(esp);
461 }
462
463 static int esp_init_aead(struct xfrm_state *x)
464 {
465 struct esp_data *esp = x->data;
466 struct crypto_aead *aead;
467 int err;
468
469 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
470 err = PTR_ERR(aead);
471 if (IS_ERR(aead))
472 goto error;
473
474 esp->aead = aead;
475
476 err = crypto_aead_setkey(aead, x->aead->alg_key,
477 (x->aead->alg_key_len + 7) / 8);
478 if (err)
479 goto error;
480
481 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
482 if (err)
483 goto error;
484
485 error:
486 return err;
487 }
488
489 static int esp_init_authenc(struct xfrm_state *x)
490 {
491 struct esp_data *esp = x->data;
492 struct crypto_aead *aead;
493 struct crypto_authenc_key_param *param;
494 struct rtattr *rta;
495 char *key;
496 char *p;
497 char authenc_name[CRYPTO_MAX_ALG_NAME];
498 unsigned int keylen;
499 int err;
500
501 err = -EINVAL;
502 if (x->ealg == NULL)
503 goto error;
504
505 err = -ENAMETOOLONG;
506
507 if ((x->props.flags & XFRM_STATE_ESN)) {
508 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
509 "authencesn(%s,%s)",
510 x->aalg ? x->aalg->alg_name : "digest_null",
511 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
512 goto error;
513 } else {
514 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
515 "authenc(%s,%s)",
516 x->aalg ? x->aalg->alg_name : "digest_null",
517 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
518 goto error;
519 }
520
521 aead = crypto_alloc_aead(authenc_name, 0, 0);
522 err = PTR_ERR(aead);
523 if (IS_ERR(aead))
524 goto error;
525
526 esp->aead = aead;
527
528 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
529 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
530 err = -ENOMEM;
531 key = kmalloc(keylen, GFP_KERNEL);
532 if (!key)
533 goto error;
534
535 p = key;
536 rta = (void *)p;
537 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
538 rta->rta_len = RTA_LENGTH(sizeof(*param));
539 param = RTA_DATA(rta);
540 p += RTA_SPACE(sizeof(*param));
541
542 if (x->aalg) {
543 struct xfrm_algo_desc *aalg_desc;
544
545 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
546 p += (x->aalg->alg_key_len + 7) / 8;
547
548 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
549 BUG_ON(!aalg_desc);
550
551 err = -EINVAL;
552 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
553 crypto_aead_authsize(aead)) {
554 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
555 x->aalg->alg_name,
556 crypto_aead_authsize(aead),
557 aalg_desc->uinfo.auth.icv_fullbits/8);
558 goto free_key;
559 }
560
561 err = crypto_aead_setauthsize(
562 aead, x->aalg->alg_trunc_len / 8);
563 if (err)
564 goto free_key;
565 }
566
567 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
568 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
569
570 err = crypto_aead_setkey(aead, key, keylen);
571
572 free_key:
573 kfree(key);
574
575 error:
576 return err;
577 }
578
579 static int esp6_init_state(struct xfrm_state *x)
580 {
581 struct esp_data *esp;
582 struct crypto_aead *aead;
583 u32 align;
584 int err;
585
586 if (x->encap)
587 return -EINVAL;
588
589 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
590 if (esp == NULL)
591 return -ENOMEM;
592
593 x->data = esp;
594
595 if (x->aead)
596 err = esp_init_aead(x);
597 else
598 err = esp_init_authenc(x);
599
600 if (err)
601 goto error;
602
603 aead = esp->aead;
604
605 esp->padlen = 0;
606
607 x->props.header_len = sizeof(struct ip_esp_hdr) +
608 crypto_aead_ivsize(aead);
609 switch (x->props.mode) {
610 case XFRM_MODE_BEET:
611 if (x->sel.family != AF_INET6)
612 x->props.header_len += IPV4_BEET_PHMAXLEN +
613 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
614 break;
615 case XFRM_MODE_TRANSPORT:
616 break;
617 case XFRM_MODE_TUNNEL:
618 x->props.header_len += sizeof(struct ipv6hdr);
619 break;
620 default:
621 goto error;
622 }
623
624 align = ALIGN(crypto_aead_blocksize(aead), 4);
625 if (esp->padlen)
626 align = max_t(u32, align, esp->padlen);
627 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
628
629 error:
630 return err;
631 }
632
633 static const struct xfrm_type esp6_type =
634 {
635 .description = "ESP6",
636 .owner = THIS_MODULE,
637 .proto = IPPROTO_ESP,
638 .flags = XFRM_TYPE_REPLAY_PROT,
639 .init_state = esp6_init_state,
640 .destructor = esp6_destroy,
641 .get_mtu = esp6_get_mtu,
642 .input = esp6_input,
643 .output = esp6_output,
644 .hdr_offset = xfrm6_find_1stfragopt,
645 };
646
647 static const struct inet6_protocol esp6_protocol = {
648 .handler = xfrm6_rcv,
649 .err_handler = esp6_err,
650 .flags = INET6_PROTO_NOPOLICY,
651 };
652
653 static int __init esp6_init(void)
654 {
655 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
656 pr_info("%s: can't add xfrm type\n", __func__);
657 return -EAGAIN;
658 }
659 if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
660 pr_info("%s: can't add protocol\n", __func__);
661 xfrm_unregister_type(&esp6_type, AF_INET6);
662 return -EAGAIN;
663 }
664
665 return 0;
666 }
667
668 static void __exit esp6_fini(void)
669 {
670 if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
671 pr_info("%s: can't remove protocol\n", __func__);
672 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
673 pr_info("%s: can't remove xfrm type\n", __func__);
674 }
675
676 module_init(esp6_init);
677 module_exit(esp6_fini);
678
679 MODULE_LICENSE("GPL");
680 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);