5a0dbeb6bbe817f02c7750ea75e5f3e6713680cf
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / xfrm / xfrm_algo.c
1 /*
2 * xfrm algorithm interface
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
16 #include <net/xfrm.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
18 #include <net/ah.h>
19 #endif
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
21 #include <net/esp.h>
22 #endif
23 #include <asm/scatterlist.h>
24
25 /*
26 * Algorithms supported by IPsec. These entries contain properties which
27 * are used in key negotiation and xfrm processing, and are used to verify
28 * that instantiated crypto transforms have correct parameters for IPsec
29 * purposes.
30 */
31 static struct xfrm_algo_desc aalg_list[] = {
32 {
33 .name = "hmac(digest_null)",
34 .compat = "digest_null",
35
36 .uinfo = {
37 .auth = {
38 .icv_truncbits = 0,
39 .icv_fullbits = 0,
40 }
41 },
42
43 .desc = {
44 .sadb_alg_id = SADB_X_AALG_NULL,
45 .sadb_alg_ivlen = 0,
46 .sadb_alg_minbits = 0,
47 .sadb_alg_maxbits = 0
48 }
49 },
50 {
51 .name = "hmac(md5)",
52 .compat = "md5",
53
54 .uinfo = {
55 .auth = {
56 .icv_truncbits = 96,
57 .icv_fullbits = 128,
58 }
59 },
60
61 .desc = {
62 .sadb_alg_id = SADB_AALG_MD5HMAC,
63 .sadb_alg_ivlen = 0,
64 .sadb_alg_minbits = 128,
65 .sadb_alg_maxbits = 128
66 }
67 },
68 {
69 .name = "hmac(sha1)",
70 .compat = "sha1",
71
72 .uinfo = {
73 .auth = {
74 .icv_truncbits = 96,
75 .icv_fullbits = 160,
76 }
77 },
78
79 .desc = {
80 .sadb_alg_id = SADB_AALG_SHA1HMAC,
81 .sadb_alg_ivlen = 0,
82 .sadb_alg_minbits = 160,
83 .sadb_alg_maxbits = 160
84 }
85 },
86 {
87 .name = "hmac(sha256)",
88 .compat = "sha256",
89
90 .uinfo = {
91 .auth = {
92 .icv_truncbits = 96,
93 .icv_fullbits = 256,
94 }
95 },
96
97 .desc = {
98 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
99 .sadb_alg_ivlen = 0,
100 .sadb_alg_minbits = 256,
101 .sadb_alg_maxbits = 256
102 }
103 },
104 {
105 .name = "hmac(ripemd160)",
106 .compat = "ripemd160",
107
108 .uinfo = {
109 .auth = {
110 .icv_truncbits = 96,
111 .icv_fullbits = 160,
112 }
113 },
114
115 .desc = {
116 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
117 .sadb_alg_ivlen = 0,
118 .sadb_alg_minbits = 160,
119 .sadb_alg_maxbits = 160
120 }
121 },
122 };
123
124 static struct xfrm_algo_desc ealg_list[] = {
125 {
126 .name = "ecb(cipher_null)",
127 .compat = "cipher_null",
128
129 .uinfo = {
130 .encr = {
131 .blockbits = 8,
132 .defkeybits = 0,
133 }
134 },
135
136 .desc = {
137 .sadb_alg_id = SADB_EALG_NULL,
138 .sadb_alg_ivlen = 0,
139 .sadb_alg_minbits = 0,
140 .sadb_alg_maxbits = 0
141 }
142 },
143 {
144 .name = "cbc(des)",
145 .compat = "des",
146
147 .uinfo = {
148 .encr = {
149 .blockbits = 64,
150 .defkeybits = 64,
151 }
152 },
153
154 .desc = {
155 .sadb_alg_id = SADB_EALG_DESCBC,
156 .sadb_alg_ivlen = 8,
157 .sadb_alg_minbits = 64,
158 .sadb_alg_maxbits = 64
159 }
160 },
161 {
162 .name = "cbc(des3_ede)",
163 .compat = "des3_ede",
164
165 .uinfo = {
166 .encr = {
167 .blockbits = 64,
168 .defkeybits = 192,
169 }
170 },
171
172 .desc = {
173 .sadb_alg_id = SADB_EALG_3DESCBC,
174 .sadb_alg_ivlen = 8,
175 .sadb_alg_minbits = 192,
176 .sadb_alg_maxbits = 192
177 }
178 },
179 {
180 .name = "cbc(cast128)",
181 .compat = "cast128",
182
183 .uinfo = {
184 .encr = {
185 .blockbits = 64,
186 .defkeybits = 128,
187 }
188 },
189
190 .desc = {
191 .sadb_alg_id = SADB_X_EALG_CASTCBC,
192 .sadb_alg_ivlen = 8,
193 .sadb_alg_minbits = 40,
194 .sadb_alg_maxbits = 128
195 }
196 },
197 {
198 .name = "cbc(blowfish)",
199 .compat = "blowfish",
200
201 .uinfo = {
202 .encr = {
203 .blockbits = 64,
204 .defkeybits = 128,
205 }
206 },
207
208 .desc = {
209 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
210 .sadb_alg_ivlen = 8,
211 .sadb_alg_minbits = 40,
212 .sadb_alg_maxbits = 448
213 }
214 },
215 {
216 .name = "cbc(aes)",
217 .compat = "aes",
218
219 .uinfo = {
220 .encr = {
221 .blockbits = 128,
222 .defkeybits = 128,
223 }
224 },
225
226 .desc = {
227 .sadb_alg_id = SADB_X_EALG_AESCBC,
228 .sadb_alg_ivlen = 8,
229 .sadb_alg_minbits = 128,
230 .sadb_alg_maxbits = 256
231 }
232 },
233 {
234 .name = "cbc(serpent)",
235 .compat = "serpent",
236
237 .uinfo = {
238 .encr = {
239 .blockbits = 128,
240 .defkeybits = 128,
241 }
242 },
243
244 .desc = {
245 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
246 .sadb_alg_ivlen = 8,
247 .sadb_alg_minbits = 128,
248 .sadb_alg_maxbits = 256,
249 }
250 },
251 {
252 .name = "cbc(twofish)",
253 .compat = "twofish",
254
255 .uinfo = {
256 .encr = {
257 .blockbits = 128,
258 .defkeybits = 128,
259 }
260 },
261
262 .desc = {
263 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
264 .sadb_alg_ivlen = 8,
265 .sadb_alg_minbits = 128,
266 .sadb_alg_maxbits = 256
267 }
268 },
269 };
270
271 static struct xfrm_algo_desc calg_list[] = {
272 {
273 .name = "deflate",
274 .uinfo = {
275 .comp = {
276 .threshold = 90,
277 }
278 },
279 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
280 },
281 {
282 .name = "lzs",
283 .uinfo = {
284 .comp = {
285 .threshold = 90,
286 }
287 },
288 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
289 },
290 {
291 .name = "lzjh",
292 .uinfo = {
293 .comp = {
294 .threshold = 50,
295 }
296 },
297 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
298 },
299 };
300
301 static inline int aalg_entries(void)
302 {
303 return ARRAY_SIZE(aalg_list);
304 }
305
306 static inline int ealg_entries(void)
307 {
308 return ARRAY_SIZE(ealg_list);
309 }
310
311 static inline int calg_entries(void)
312 {
313 return ARRAY_SIZE(calg_list);
314 }
315
316 /* Todo: generic iterators */
317 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
318 {
319 int i;
320
321 for (i = 0; i < aalg_entries(); i++) {
322 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
323 if (aalg_list[i].available)
324 return &aalg_list[i];
325 else
326 break;
327 }
328 }
329 return NULL;
330 }
331 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
332
333 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
334 {
335 int i;
336
337 for (i = 0; i < ealg_entries(); i++) {
338 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
339 if (ealg_list[i].available)
340 return &ealg_list[i];
341 else
342 break;
343 }
344 }
345 return NULL;
346 }
347 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
348
349 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
350 {
351 int i;
352
353 for (i = 0; i < calg_entries(); i++) {
354 if (calg_list[i].desc.sadb_alg_id == alg_id) {
355 if (calg_list[i].available)
356 return &calg_list[i];
357 else
358 break;
359 }
360 }
361 return NULL;
362 }
363 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
364
365 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
366 int entries, u32 type, u32 mask,
367 char *name, int probe)
368 {
369 int i, status;
370
371 if (!name)
372 return NULL;
373
374 for (i = 0; i < entries; i++) {
375 if (strcmp(name, list[i].name) &&
376 (!list[i].compat || strcmp(name, list[i].compat)))
377 continue;
378
379 if (list[i].available)
380 return &list[i];
381
382 if (!probe)
383 break;
384
385 status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC);
386 if (!status)
387 break;
388
389 list[i].available = status;
390 return &list[i];
391 }
392 return NULL;
393 }
394
395 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
396 {
397 return xfrm_get_byname(aalg_list, aalg_entries(),
398 CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
399 name, probe);
400 }
401 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
402
403 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
404 {
405 return xfrm_get_byname(ealg_list, ealg_entries(),
406 CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
407 name, probe);
408 }
409 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
410
411 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
412 {
413 return xfrm_get_byname(calg_list, calg_entries(),
414 CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
415 name, probe);
416 }
417 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
418
419 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
420 {
421 if (idx >= aalg_entries())
422 return NULL;
423
424 return &aalg_list[idx];
425 }
426 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
427
428 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
429 {
430 if (idx >= ealg_entries())
431 return NULL;
432
433 return &ealg_list[idx];
434 }
435 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
436
437 /*
438 * Probe for the availability of crypto algorithms, and set the available
439 * flag for any algorithms found on the system. This is typically called by
440 * pfkey during userspace SA add, update or register.
441 */
442 void xfrm_probe_algs(void)
443 {
444 #ifdef CONFIG_CRYPTO
445 int i, status;
446
447 BUG_ON(in_softirq());
448
449 for (i = 0; i < aalg_entries(); i++) {
450 status = crypto_has_hash(aalg_list[i].name, 0,
451 CRYPTO_ALG_ASYNC);
452 if (aalg_list[i].available != status)
453 aalg_list[i].available = status;
454 }
455
456 for (i = 0; i < ealg_entries(); i++) {
457 status = crypto_has_blkcipher(ealg_list[i].name, 0,
458 CRYPTO_ALG_ASYNC);
459 if (ealg_list[i].available != status)
460 ealg_list[i].available = status;
461 }
462
463 for (i = 0; i < calg_entries(); i++) {
464 status = crypto_has_comp(calg_list[i].name, 0,
465 CRYPTO_ALG_ASYNC);
466 if (calg_list[i].available != status)
467 calg_list[i].available = status;
468 }
469 #endif
470 }
471 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
472
473 int xfrm_count_auth_supported(void)
474 {
475 int i, n;
476
477 for (i = 0, n = 0; i < aalg_entries(); i++)
478 if (aalg_list[i].available)
479 n++;
480 return n;
481 }
482 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
483
484 int xfrm_count_enc_supported(void)
485 {
486 int i, n;
487
488 for (i = 0, n = 0; i < ealg_entries(); i++)
489 if (ealg_list[i].available)
490 n++;
491 return n;
492 }
493 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
494
495 /* Move to common area: it is shared with AH. */
496
497 int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
498 int offset, int len, icv_update_fn_t icv_update)
499 {
500 int start = skb_headlen(skb);
501 int i, copy = start - offset;
502 int err;
503 struct scatterlist sg;
504
505 /* Checksum header. */
506 if (copy > 0) {
507 if (copy > len)
508 copy = len;
509
510 sg.page = virt_to_page(skb->data + offset);
511 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
512 sg.length = copy;
513
514 err = icv_update(desc, &sg, copy);
515 if (unlikely(err))
516 return err;
517
518 if ((len -= copy) == 0)
519 return 0;
520 offset += copy;
521 }
522
523 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
524 int end;
525
526 BUG_TRAP(start <= offset + len);
527
528 end = start + skb_shinfo(skb)->frags[i].size;
529 if ((copy = end - offset) > 0) {
530 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
531
532 if (copy > len)
533 copy = len;
534
535 sg.page = frag->page;
536 sg.offset = frag->page_offset + offset-start;
537 sg.length = copy;
538
539 err = icv_update(desc, &sg, copy);
540 if (unlikely(err))
541 return err;
542
543 if (!(len -= copy))
544 return 0;
545 offset += copy;
546 }
547 start = end;
548 }
549
550 if (skb_shinfo(skb)->frag_list) {
551 struct sk_buff *list = skb_shinfo(skb)->frag_list;
552
553 for (; list; list = list->next) {
554 int end;
555
556 BUG_TRAP(start <= offset + len);
557
558 end = start + list->len;
559 if ((copy = end - offset) > 0) {
560 if (copy > len)
561 copy = len;
562 err = skb_icv_walk(list, desc, offset-start,
563 copy, icv_update);
564 if (unlikely(err))
565 return err;
566 if ((len -= copy) == 0)
567 return 0;
568 offset += copy;
569 }
570 start = end;
571 }
572 }
573 BUG_ON(len);
574 return 0;
575 }
576 EXPORT_SYMBOL_GPL(skb_icv_walk);
577
578 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
579
580 /* Looking generic it is not used in another places. */
581
582 int
583 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
584 {
585 int start = skb_headlen(skb);
586 int i, copy = start - offset;
587 int elt = 0;
588
589 if (copy > 0) {
590 if (copy > len)
591 copy = len;
592 sg[elt].page = virt_to_page(skb->data + offset);
593 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
594 sg[elt].length = copy;
595 elt++;
596 if ((len -= copy) == 0)
597 return elt;
598 offset += copy;
599 }
600
601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
602 int end;
603
604 BUG_TRAP(start <= offset + len);
605
606 end = start + skb_shinfo(skb)->frags[i].size;
607 if ((copy = end - offset) > 0) {
608 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
609
610 if (copy > len)
611 copy = len;
612 sg[elt].page = frag->page;
613 sg[elt].offset = frag->page_offset+offset-start;
614 sg[elt].length = copy;
615 elt++;
616 if (!(len -= copy))
617 return elt;
618 offset += copy;
619 }
620 start = end;
621 }
622
623 if (skb_shinfo(skb)->frag_list) {
624 struct sk_buff *list = skb_shinfo(skb)->frag_list;
625
626 for (; list; list = list->next) {
627 int end;
628
629 BUG_TRAP(start <= offset + len);
630
631 end = start + list->len;
632 if ((copy = end - offset) > 0) {
633 if (copy > len)
634 copy = len;
635 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
636 if ((len -= copy) == 0)
637 return elt;
638 offset += copy;
639 }
640 start = end;
641 }
642 }
643 BUG_ON(len);
644 return elt;
645 }
646 EXPORT_SYMBOL_GPL(skb_to_sgvec);
647
648 /* Check that skb data bits are writable. If they are not, copy data
649 * to newly created private area. If "tailbits" is given, make sure that
650 * tailbits bytes beyond current end of skb are writable.
651 *
652 * Returns amount of elements of scatterlist to load for subsequent
653 * transformations and pointer to writable trailer skb.
654 */
655
656 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
657 {
658 int copyflag;
659 int elt;
660 struct sk_buff *skb1, **skb_p;
661
662 /* If skb is cloned or its head is paged, reallocate
663 * head pulling out all the pages (pages are considered not writable
664 * at the moment even if they are anonymous).
665 */
666 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
667 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
668 return -ENOMEM;
669
670 /* Easy case. Most of packets will go this way. */
671 if (!skb_shinfo(skb)->frag_list) {
672 /* A little of trouble, not enough of space for trailer.
673 * This should not happen, when stack is tuned to generate
674 * good frames. OK, on miss we reallocate and reserve even more
675 * space, 128 bytes is fair. */
676
677 if (skb_tailroom(skb) < tailbits &&
678 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
679 return -ENOMEM;
680
681 /* Voila! */
682 *trailer = skb;
683 return 1;
684 }
685
686 /* Misery. We are in troubles, going to mincer fragments... */
687
688 elt = 1;
689 skb_p = &skb_shinfo(skb)->frag_list;
690 copyflag = 0;
691
692 while ((skb1 = *skb_p) != NULL) {
693 int ntail = 0;
694
695 /* The fragment is partially pulled by someone,
696 * this can happen on input. Copy it and everything
697 * after it. */
698
699 if (skb_shared(skb1))
700 copyflag = 1;
701
702 /* If the skb is the last, worry about trailer. */
703
704 if (skb1->next == NULL && tailbits) {
705 if (skb_shinfo(skb1)->nr_frags ||
706 skb_shinfo(skb1)->frag_list ||
707 skb_tailroom(skb1) < tailbits)
708 ntail = tailbits + 128;
709 }
710
711 if (copyflag ||
712 skb_cloned(skb1) ||
713 ntail ||
714 skb_shinfo(skb1)->nr_frags ||
715 skb_shinfo(skb1)->frag_list) {
716 struct sk_buff *skb2;
717
718 /* Fuck, we are miserable poor guys... */
719 if (ntail == 0)
720 skb2 = skb_copy(skb1, GFP_ATOMIC);
721 else
722 skb2 = skb_copy_expand(skb1,
723 skb_headroom(skb1),
724 ntail,
725 GFP_ATOMIC);
726 if (unlikely(skb2 == NULL))
727 return -ENOMEM;
728
729 if (skb1->sk)
730 skb_set_owner_w(skb2, skb1->sk);
731
732 /* Looking around. Are we still alive?
733 * OK, link new skb, drop old one */
734
735 skb2->next = skb1->next;
736 *skb_p = skb2;
737 kfree_skb(skb1);
738 skb1 = skb2;
739 }
740 elt++;
741 *trailer = skb1;
742 skb_p = &skb1->next;
743 }
744
745 return elt;
746 }
747 EXPORT_SYMBOL_GPL(skb_cow_data);
748
749 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
750 {
751 if (tail != skb) {
752 skb->data_len += len;
753 skb->len += len;
754 }
755 return skb_put(tail, len);
756 }
757 EXPORT_SYMBOL_GPL(pskb_put);
758 #endif