[NET]: Convert xtime.tv_sec to get_seconds()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / xfrm / xfrm_policy.c
1 /*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16 #include <linux/slab.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/spinlock.h>
20 #include <linux/workqueue.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/module.h>
25 #include <linux/cache.h>
26 #include <net/xfrm.h>
27 #include <net/ip.h>
28 #include <linux/audit.h>
29
30 #include "xfrm_hash.h"
31
32 DEFINE_MUTEX(xfrm_cfg_mutex);
33 EXPORT_SYMBOL(xfrm_cfg_mutex);
34
35 static DEFINE_RWLOCK(xfrm_policy_lock);
36
37 unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
38 EXPORT_SYMBOL(xfrm_policy_count);
39
40 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
41 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
42
43 static struct kmem_cache *xfrm_dst_cache __read_mostly;
44
45 static struct work_struct xfrm_policy_gc_work;
46 static HLIST_HEAD(xfrm_policy_gc_list);
47 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
48
49 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
50 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
51 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
52 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
53
54 static inline int
55 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
56 {
57 return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
58 addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
59 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
60 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
61 (fl->proto == sel->proto || !sel->proto) &&
62 (fl->oif == sel->ifindex || !sel->ifindex);
63 }
64
65 static inline int
66 __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
67 {
68 return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
69 addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
70 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
71 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
72 (fl->proto == sel->proto || !sel->proto) &&
73 (fl->oif == sel->ifindex || !sel->ifindex);
74 }
75
76 int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
77 unsigned short family)
78 {
79 switch (family) {
80 case AF_INET:
81 return __xfrm4_selector_match(sel, fl);
82 case AF_INET6:
83 return __xfrm6_selector_match(sel, fl);
84 }
85 return 0;
86 }
87
88 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
89 {
90 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
91 struct xfrm_type **typemap;
92 int err = 0;
93
94 if (unlikely(afinfo == NULL))
95 return -EAFNOSUPPORT;
96 typemap = afinfo->type_map;
97
98 if (likely(typemap[type->proto] == NULL))
99 typemap[type->proto] = type;
100 else
101 err = -EEXIST;
102 xfrm_policy_unlock_afinfo(afinfo);
103 return err;
104 }
105 EXPORT_SYMBOL(xfrm_register_type);
106
107 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
108 {
109 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
110 struct xfrm_type **typemap;
111 int err = 0;
112
113 if (unlikely(afinfo == NULL))
114 return -EAFNOSUPPORT;
115 typemap = afinfo->type_map;
116
117 if (unlikely(typemap[type->proto] != type))
118 err = -ENOENT;
119 else
120 typemap[type->proto] = NULL;
121 xfrm_policy_unlock_afinfo(afinfo);
122 return err;
123 }
124 EXPORT_SYMBOL(xfrm_unregister_type);
125
126 struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
127 {
128 struct xfrm_policy_afinfo *afinfo;
129 struct xfrm_type **typemap;
130 struct xfrm_type *type;
131 int modload_attempted = 0;
132
133 retry:
134 afinfo = xfrm_policy_get_afinfo(family);
135 if (unlikely(afinfo == NULL))
136 return NULL;
137 typemap = afinfo->type_map;
138
139 type = typemap[proto];
140 if (unlikely(type && !try_module_get(type->owner)))
141 type = NULL;
142 if (!type && !modload_attempted) {
143 xfrm_policy_put_afinfo(afinfo);
144 request_module("xfrm-type-%d-%d",
145 (int) family, (int) proto);
146 modload_attempted = 1;
147 goto retry;
148 }
149
150 xfrm_policy_put_afinfo(afinfo);
151 return type;
152 }
153
154 int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
155 unsigned short family)
156 {
157 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
158 int err = 0;
159
160 if (unlikely(afinfo == NULL))
161 return -EAFNOSUPPORT;
162
163 if (likely(afinfo->dst_lookup != NULL))
164 err = afinfo->dst_lookup(dst, fl);
165 else
166 err = -EINVAL;
167 xfrm_policy_put_afinfo(afinfo);
168 return err;
169 }
170 EXPORT_SYMBOL(xfrm_dst_lookup);
171
172 void xfrm_put_type(struct xfrm_type *type)
173 {
174 module_put(type->owner);
175 }
176
177 int xfrm_register_mode(struct xfrm_mode *mode, int family)
178 {
179 struct xfrm_policy_afinfo *afinfo;
180 struct xfrm_mode **modemap;
181 int err;
182
183 if (unlikely(mode->encap >= XFRM_MODE_MAX))
184 return -EINVAL;
185
186 afinfo = xfrm_policy_lock_afinfo(family);
187 if (unlikely(afinfo == NULL))
188 return -EAFNOSUPPORT;
189
190 err = -EEXIST;
191 modemap = afinfo->mode_map;
192 if (likely(modemap[mode->encap] == NULL)) {
193 modemap[mode->encap] = mode;
194 err = 0;
195 }
196
197 xfrm_policy_unlock_afinfo(afinfo);
198 return err;
199 }
200 EXPORT_SYMBOL(xfrm_register_mode);
201
202 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
203 {
204 struct xfrm_policy_afinfo *afinfo;
205 struct xfrm_mode **modemap;
206 int err;
207
208 if (unlikely(mode->encap >= XFRM_MODE_MAX))
209 return -EINVAL;
210
211 afinfo = xfrm_policy_lock_afinfo(family);
212 if (unlikely(afinfo == NULL))
213 return -EAFNOSUPPORT;
214
215 err = -ENOENT;
216 modemap = afinfo->mode_map;
217 if (likely(modemap[mode->encap] == mode)) {
218 modemap[mode->encap] = NULL;
219 err = 0;
220 }
221
222 xfrm_policy_unlock_afinfo(afinfo);
223 return err;
224 }
225 EXPORT_SYMBOL(xfrm_unregister_mode);
226
227 struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
228 {
229 struct xfrm_policy_afinfo *afinfo;
230 struct xfrm_mode *mode;
231 int modload_attempted = 0;
232
233 if (unlikely(encap >= XFRM_MODE_MAX))
234 return NULL;
235
236 retry:
237 afinfo = xfrm_policy_get_afinfo(family);
238 if (unlikely(afinfo == NULL))
239 return NULL;
240
241 mode = afinfo->mode_map[encap];
242 if (unlikely(mode && !try_module_get(mode->owner)))
243 mode = NULL;
244 if (!mode && !modload_attempted) {
245 xfrm_policy_put_afinfo(afinfo);
246 request_module("xfrm-mode-%d-%d", family, encap);
247 modload_attempted = 1;
248 goto retry;
249 }
250
251 xfrm_policy_put_afinfo(afinfo);
252 return mode;
253 }
254
255 void xfrm_put_mode(struct xfrm_mode *mode)
256 {
257 module_put(mode->owner);
258 }
259
260 static inline unsigned long make_jiffies(long secs)
261 {
262 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
263 return MAX_SCHEDULE_TIMEOUT-1;
264 else
265 return secs*HZ;
266 }
267
268 static void xfrm_policy_timer(unsigned long data)
269 {
270 struct xfrm_policy *xp = (struct xfrm_policy*)data;
271 unsigned long now = get_seconds();
272 long next = LONG_MAX;
273 int warn = 0;
274 int dir;
275
276 read_lock(&xp->lock);
277
278 if (xp->dead)
279 goto out;
280
281 dir = xfrm_policy_id2dir(xp->index);
282
283 if (xp->lft.hard_add_expires_seconds) {
284 long tmo = xp->lft.hard_add_expires_seconds +
285 xp->curlft.add_time - now;
286 if (tmo <= 0)
287 goto expired;
288 if (tmo < next)
289 next = tmo;
290 }
291 if (xp->lft.hard_use_expires_seconds) {
292 long tmo = xp->lft.hard_use_expires_seconds +
293 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
294 if (tmo <= 0)
295 goto expired;
296 if (tmo < next)
297 next = tmo;
298 }
299 if (xp->lft.soft_add_expires_seconds) {
300 long tmo = xp->lft.soft_add_expires_seconds +
301 xp->curlft.add_time - now;
302 if (tmo <= 0) {
303 warn = 1;
304 tmo = XFRM_KM_TIMEOUT;
305 }
306 if (tmo < next)
307 next = tmo;
308 }
309 if (xp->lft.soft_use_expires_seconds) {
310 long tmo = xp->lft.soft_use_expires_seconds +
311 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
312 if (tmo <= 0) {
313 warn = 1;
314 tmo = XFRM_KM_TIMEOUT;
315 }
316 if (tmo < next)
317 next = tmo;
318 }
319
320 if (warn)
321 km_policy_expired(xp, dir, 0, 0);
322 if (next != LONG_MAX &&
323 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
324 xfrm_pol_hold(xp);
325
326 out:
327 read_unlock(&xp->lock);
328 xfrm_pol_put(xp);
329 return;
330
331 expired:
332 read_unlock(&xp->lock);
333 if (!xfrm_policy_delete(xp, dir))
334 km_policy_expired(xp, dir, 1, 0);
335 xfrm_pol_put(xp);
336 }
337
338
339 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
340 * SPD calls.
341 */
342
343 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
344 {
345 struct xfrm_policy *policy;
346
347 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
348
349 if (policy) {
350 INIT_HLIST_NODE(&policy->bydst);
351 INIT_HLIST_NODE(&policy->byidx);
352 rwlock_init(&policy->lock);
353 atomic_set(&policy->refcnt, 1);
354 init_timer(&policy->timer);
355 policy->timer.data = (unsigned long)policy;
356 policy->timer.function = xfrm_policy_timer;
357 }
358 return policy;
359 }
360 EXPORT_SYMBOL(xfrm_policy_alloc);
361
362 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
363
364 void __xfrm_policy_destroy(struct xfrm_policy *policy)
365 {
366 BUG_ON(!policy->dead);
367
368 BUG_ON(policy->bundles);
369
370 if (del_timer(&policy->timer))
371 BUG();
372
373 security_xfrm_policy_free(policy);
374 kfree(policy);
375 }
376 EXPORT_SYMBOL(__xfrm_policy_destroy);
377
378 static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
379 {
380 struct dst_entry *dst;
381
382 while ((dst = policy->bundles) != NULL) {
383 policy->bundles = dst->next;
384 dst_free(dst);
385 }
386
387 if (del_timer(&policy->timer))
388 atomic_dec(&policy->refcnt);
389
390 if (atomic_read(&policy->refcnt) > 1)
391 flow_cache_flush();
392
393 xfrm_pol_put(policy);
394 }
395
396 static void xfrm_policy_gc_task(struct work_struct *work)
397 {
398 struct xfrm_policy *policy;
399 struct hlist_node *entry, *tmp;
400 struct hlist_head gc_list;
401
402 spin_lock_bh(&xfrm_policy_gc_lock);
403 gc_list.first = xfrm_policy_gc_list.first;
404 INIT_HLIST_HEAD(&xfrm_policy_gc_list);
405 spin_unlock_bh(&xfrm_policy_gc_lock);
406
407 hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
408 xfrm_policy_gc_kill(policy);
409 }
410
411 /* Rule must be locked. Release descentant resources, announce
412 * entry dead. The rule must be unlinked from lists to the moment.
413 */
414
415 static void xfrm_policy_kill(struct xfrm_policy *policy)
416 {
417 int dead;
418
419 write_lock_bh(&policy->lock);
420 dead = policy->dead;
421 policy->dead = 1;
422 write_unlock_bh(&policy->lock);
423
424 if (unlikely(dead)) {
425 WARN_ON(1);
426 return;
427 }
428
429 spin_lock(&xfrm_policy_gc_lock);
430 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
431 spin_unlock(&xfrm_policy_gc_lock);
432
433 schedule_work(&xfrm_policy_gc_work);
434 }
435
436 struct xfrm_policy_hash {
437 struct hlist_head *table;
438 unsigned int hmask;
439 };
440
441 static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
442 static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
443 static struct hlist_head *xfrm_policy_byidx __read_mostly;
444 static unsigned int xfrm_idx_hmask __read_mostly;
445 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
446
447 static inline unsigned int idx_hash(u32 index)
448 {
449 return __idx_hash(index, xfrm_idx_hmask);
450 }
451
452 static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
453 {
454 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
455 unsigned int hash = __sel_hash(sel, family, hmask);
456
457 return (hash == hmask + 1 ?
458 &xfrm_policy_inexact[dir] :
459 xfrm_policy_bydst[dir].table + hash);
460 }
461
462 static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
463 {
464 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
465 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
466
467 return xfrm_policy_bydst[dir].table + hash;
468 }
469
470 static void xfrm_dst_hash_transfer(struct hlist_head *list,
471 struct hlist_head *ndsttable,
472 unsigned int nhashmask)
473 {
474 struct hlist_node *entry, *tmp;
475 struct xfrm_policy *pol;
476
477 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
478 unsigned int h;
479
480 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
481 pol->family, nhashmask);
482 hlist_add_head(&pol->bydst, ndsttable+h);
483 }
484 }
485
486 static void xfrm_idx_hash_transfer(struct hlist_head *list,
487 struct hlist_head *nidxtable,
488 unsigned int nhashmask)
489 {
490 struct hlist_node *entry, *tmp;
491 struct xfrm_policy *pol;
492
493 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
494 unsigned int h;
495
496 h = __idx_hash(pol->index, nhashmask);
497 hlist_add_head(&pol->byidx, nidxtable+h);
498 }
499 }
500
501 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
502 {
503 return ((old_hmask + 1) << 1) - 1;
504 }
505
506 static void xfrm_bydst_resize(int dir)
507 {
508 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
509 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
510 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
511 struct hlist_head *odst = xfrm_policy_bydst[dir].table;
512 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
513 int i;
514
515 if (!ndst)
516 return;
517
518 write_lock_bh(&xfrm_policy_lock);
519
520 for (i = hmask; i >= 0; i--)
521 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
522
523 xfrm_policy_bydst[dir].table = ndst;
524 xfrm_policy_bydst[dir].hmask = nhashmask;
525
526 write_unlock_bh(&xfrm_policy_lock);
527
528 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
529 }
530
531 static void xfrm_byidx_resize(int total)
532 {
533 unsigned int hmask = xfrm_idx_hmask;
534 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
535 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
536 struct hlist_head *oidx = xfrm_policy_byidx;
537 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
538 int i;
539
540 if (!nidx)
541 return;
542
543 write_lock_bh(&xfrm_policy_lock);
544
545 for (i = hmask; i >= 0; i--)
546 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
547
548 xfrm_policy_byidx = nidx;
549 xfrm_idx_hmask = nhashmask;
550
551 write_unlock_bh(&xfrm_policy_lock);
552
553 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
554 }
555
556 static inline int xfrm_bydst_should_resize(int dir, int *total)
557 {
558 unsigned int cnt = xfrm_policy_count[dir];
559 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
560
561 if (total)
562 *total += cnt;
563
564 if ((hmask + 1) < xfrm_policy_hashmax &&
565 cnt > hmask)
566 return 1;
567
568 return 0;
569 }
570
571 static inline int xfrm_byidx_should_resize(int total)
572 {
573 unsigned int hmask = xfrm_idx_hmask;
574
575 if ((hmask + 1) < xfrm_policy_hashmax &&
576 total > hmask)
577 return 1;
578
579 return 0;
580 }
581
582 static DEFINE_MUTEX(hash_resize_mutex);
583
584 static void xfrm_hash_resize(struct work_struct *__unused)
585 {
586 int dir, total;
587
588 mutex_lock(&hash_resize_mutex);
589
590 total = 0;
591 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
592 if (xfrm_bydst_should_resize(dir, &total))
593 xfrm_bydst_resize(dir);
594 }
595 if (xfrm_byidx_should_resize(total))
596 xfrm_byidx_resize(total);
597
598 mutex_unlock(&hash_resize_mutex);
599 }
600
601 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
602
603 /* Generate new index... KAME seems to generate them ordered by cost
604 * of an absolute inpredictability of ordering of rules. This will not pass. */
605 static u32 xfrm_gen_index(u8 type, int dir)
606 {
607 static u32 idx_generator;
608
609 for (;;) {
610 struct hlist_node *entry;
611 struct hlist_head *list;
612 struct xfrm_policy *p;
613 u32 idx;
614 int found;
615
616 idx = (idx_generator | dir);
617 idx_generator += 8;
618 if (idx == 0)
619 idx = 8;
620 list = xfrm_policy_byidx + idx_hash(idx);
621 found = 0;
622 hlist_for_each_entry(p, entry, list, byidx) {
623 if (p->index == idx) {
624 found = 1;
625 break;
626 }
627 }
628 if (!found)
629 return idx;
630 }
631 }
632
633 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
634 {
635 u32 *p1 = (u32 *) s1;
636 u32 *p2 = (u32 *) s2;
637 int len = sizeof(struct xfrm_selector) / sizeof(u32);
638 int i;
639
640 for (i = 0; i < len; i++) {
641 if (p1[i] != p2[i])
642 return 1;
643 }
644
645 return 0;
646 }
647
648 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
649 {
650 struct xfrm_policy *pol;
651 struct xfrm_policy *delpol;
652 struct hlist_head *chain;
653 struct hlist_node *entry, *newpos;
654 struct dst_entry *gc_list;
655
656 write_lock_bh(&xfrm_policy_lock);
657 chain = policy_hash_bysel(&policy->selector, policy->family, dir);
658 delpol = NULL;
659 newpos = NULL;
660 hlist_for_each_entry(pol, entry, chain, bydst) {
661 if (pol->type == policy->type &&
662 !selector_cmp(&pol->selector, &policy->selector) &&
663 xfrm_sec_ctx_match(pol->security, policy->security) &&
664 !WARN_ON(delpol)) {
665 if (excl) {
666 write_unlock_bh(&xfrm_policy_lock);
667 return -EEXIST;
668 }
669 delpol = pol;
670 if (policy->priority > pol->priority)
671 continue;
672 } else if (policy->priority >= pol->priority) {
673 newpos = &pol->bydst;
674 continue;
675 }
676 if (delpol)
677 break;
678 }
679 if (newpos)
680 hlist_add_after(newpos, &policy->bydst);
681 else
682 hlist_add_head(&policy->bydst, chain);
683 xfrm_pol_hold(policy);
684 xfrm_policy_count[dir]++;
685 atomic_inc(&flow_cache_genid);
686 if (delpol) {
687 hlist_del(&delpol->bydst);
688 hlist_del(&delpol->byidx);
689 xfrm_policy_count[dir]--;
690 }
691 policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
692 hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
693 policy->curlft.add_time = get_seconds();
694 policy->curlft.use_time = 0;
695 if (!mod_timer(&policy->timer, jiffies + HZ))
696 xfrm_pol_hold(policy);
697 write_unlock_bh(&xfrm_policy_lock);
698
699 if (delpol)
700 xfrm_policy_kill(delpol);
701 else if (xfrm_bydst_should_resize(dir, NULL))
702 schedule_work(&xfrm_hash_work);
703
704 read_lock_bh(&xfrm_policy_lock);
705 gc_list = NULL;
706 entry = &policy->bydst;
707 hlist_for_each_entry_continue(policy, entry, bydst) {
708 struct dst_entry *dst;
709
710 write_lock(&policy->lock);
711 dst = policy->bundles;
712 if (dst) {
713 struct dst_entry *tail = dst;
714 while (tail->next)
715 tail = tail->next;
716 tail->next = gc_list;
717 gc_list = dst;
718
719 policy->bundles = NULL;
720 }
721 write_unlock(&policy->lock);
722 }
723 read_unlock_bh(&xfrm_policy_lock);
724
725 while (gc_list) {
726 struct dst_entry *dst = gc_list;
727
728 gc_list = dst->next;
729 dst_free(dst);
730 }
731
732 return 0;
733 }
734 EXPORT_SYMBOL(xfrm_policy_insert);
735
736 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
737 struct xfrm_selector *sel,
738 struct xfrm_sec_ctx *ctx, int delete,
739 int *err)
740 {
741 struct xfrm_policy *pol, *ret;
742 struct hlist_head *chain;
743 struct hlist_node *entry;
744
745 *err = 0;
746 write_lock_bh(&xfrm_policy_lock);
747 chain = policy_hash_bysel(sel, sel->family, dir);
748 ret = NULL;
749 hlist_for_each_entry(pol, entry, chain, bydst) {
750 if (pol->type == type &&
751 !selector_cmp(sel, &pol->selector) &&
752 xfrm_sec_ctx_match(ctx, pol->security)) {
753 xfrm_pol_hold(pol);
754 if (delete) {
755 *err = security_xfrm_policy_delete(pol);
756 if (*err) {
757 write_unlock_bh(&xfrm_policy_lock);
758 return pol;
759 }
760 hlist_del(&pol->bydst);
761 hlist_del(&pol->byidx);
762 xfrm_policy_count[dir]--;
763 }
764 ret = pol;
765 break;
766 }
767 }
768 write_unlock_bh(&xfrm_policy_lock);
769
770 if (ret && delete) {
771 atomic_inc(&flow_cache_genid);
772 xfrm_policy_kill(ret);
773 }
774 return ret;
775 }
776 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
777
778 struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
779 int *err)
780 {
781 struct xfrm_policy *pol, *ret;
782 struct hlist_head *chain;
783 struct hlist_node *entry;
784
785 *err = 0;
786 write_lock_bh(&xfrm_policy_lock);
787 chain = xfrm_policy_byidx + idx_hash(id);
788 ret = NULL;
789 hlist_for_each_entry(pol, entry, chain, byidx) {
790 if (pol->type == type && pol->index == id) {
791 xfrm_pol_hold(pol);
792 if (delete) {
793 *err = security_xfrm_policy_delete(pol);
794 if (*err) {
795 write_unlock_bh(&xfrm_policy_lock);
796 return pol;
797 }
798 hlist_del(&pol->bydst);
799 hlist_del(&pol->byidx);
800 xfrm_policy_count[dir]--;
801 }
802 ret = pol;
803 break;
804 }
805 }
806 write_unlock_bh(&xfrm_policy_lock);
807
808 if (ret && delete) {
809 atomic_inc(&flow_cache_genid);
810 xfrm_policy_kill(ret);
811 }
812 return ret;
813 }
814 EXPORT_SYMBOL(xfrm_policy_byid);
815
816 void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
817 {
818 int dir;
819
820 write_lock_bh(&xfrm_policy_lock);
821 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
822 struct xfrm_policy *pol;
823 struct hlist_node *entry;
824 int i, killed;
825
826 killed = 0;
827 again1:
828 hlist_for_each_entry(pol, entry,
829 &xfrm_policy_inexact[dir], bydst) {
830 if (pol->type != type)
831 continue;
832 hlist_del(&pol->bydst);
833 hlist_del(&pol->byidx);
834 write_unlock_bh(&xfrm_policy_lock);
835
836 xfrm_audit_log(audit_info->loginuid, audit_info->secid,
837 AUDIT_MAC_IPSEC_DELSPD, 1, pol, NULL);
838
839 xfrm_policy_kill(pol);
840 killed++;
841
842 write_lock_bh(&xfrm_policy_lock);
843 goto again1;
844 }
845
846 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
847 again2:
848 hlist_for_each_entry(pol, entry,
849 xfrm_policy_bydst[dir].table + i,
850 bydst) {
851 if (pol->type != type)
852 continue;
853 hlist_del(&pol->bydst);
854 hlist_del(&pol->byidx);
855 write_unlock_bh(&xfrm_policy_lock);
856
857 xfrm_audit_log(audit_info->loginuid,
858 audit_info->secid,
859 AUDIT_MAC_IPSEC_DELSPD, 1,
860 pol, NULL);
861
862 xfrm_policy_kill(pol);
863 killed++;
864
865 write_lock_bh(&xfrm_policy_lock);
866 goto again2;
867 }
868 }
869
870 xfrm_policy_count[dir] -= killed;
871 }
872 atomic_inc(&flow_cache_genid);
873 write_unlock_bh(&xfrm_policy_lock);
874 }
875 EXPORT_SYMBOL(xfrm_policy_flush);
876
877 int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
878 void *data)
879 {
880 struct xfrm_policy *pol, *last = NULL;
881 struct hlist_node *entry;
882 int dir, last_dir = 0, count, error;
883
884 read_lock_bh(&xfrm_policy_lock);
885 count = 0;
886
887 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
888 struct hlist_head *table = xfrm_policy_bydst[dir].table;
889 int i;
890
891 hlist_for_each_entry(pol, entry,
892 &xfrm_policy_inexact[dir], bydst) {
893 if (pol->type != type)
894 continue;
895 if (last) {
896 error = func(last, last_dir % XFRM_POLICY_MAX,
897 count, data);
898 if (error)
899 goto out;
900 }
901 last = pol;
902 last_dir = dir;
903 count++;
904 }
905 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
906 hlist_for_each_entry(pol, entry, table + i, bydst) {
907 if (pol->type != type)
908 continue;
909 if (last) {
910 error = func(last, last_dir % XFRM_POLICY_MAX,
911 count, data);
912 if (error)
913 goto out;
914 }
915 last = pol;
916 last_dir = dir;
917 count++;
918 }
919 }
920 }
921 if (count == 0) {
922 error = -ENOENT;
923 goto out;
924 }
925 error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
926 out:
927 read_unlock_bh(&xfrm_policy_lock);
928 return error;
929 }
930 EXPORT_SYMBOL(xfrm_policy_walk);
931
932 /*
933 * Find policy to apply to this flow.
934 *
935 * Returns 0 if policy found, else an -errno.
936 */
937 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
938 u8 type, u16 family, int dir)
939 {
940 struct xfrm_selector *sel = &pol->selector;
941 int match, ret = -ESRCH;
942
943 if (pol->family != family ||
944 pol->type != type)
945 return ret;
946
947 match = xfrm_selector_match(sel, fl, family);
948 if (match)
949 ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
950
951 return ret;
952 }
953
954 static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
955 u16 family, u8 dir)
956 {
957 int err;
958 struct xfrm_policy *pol, *ret;
959 xfrm_address_t *daddr, *saddr;
960 struct hlist_node *entry;
961 struct hlist_head *chain;
962 u32 priority = ~0U;
963
964 daddr = xfrm_flowi_daddr(fl, family);
965 saddr = xfrm_flowi_saddr(fl, family);
966 if (unlikely(!daddr || !saddr))
967 return NULL;
968
969 read_lock_bh(&xfrm_policy_lock);
970 chain = policy_hash_direct(daddr, saddr, family, dir);
971 ret = NULL;
972 hlist_for_each_entry(pol, entry, chain, bydst) {
973 err = xfrm_policy_match(pol, fl, type, family, dir);
974 if (err) {
975 if (err == -ESRCH)
976 continue;
977 else {
978 ret = ERR_PTR(err);
979 goto fail;
980 }
981 } else {
982 ret = pol;
983 priority = ret->priority;
984 break;
985 }
986 }
987 chain = &xfrm_policy_inexact[dir];
988 hlist_for_each_entry(pol, entry, chain, bydst) {
989 err = xfrm_policy_match(pol, fl, type, family, dir);
990 if (err) {
991 if (err == -ESRCH)
992 continue;
993 else {
994 ret = ERR_PTR(err);
995 goto fail;
996 }
997 } else if (pol->priority < priority) {
998 ret = pol;
999 break;
1000 }
1001 }
1002 if (ret)
1003 xfrm_pol_hold(ret);
1004 fail:
1005 read_unlock_bh(&xfrm_policy_lock);
1006
1007 return ret;
1008 }
1009
1010 static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
1011 void **objp, atomic_t **obj_refp)
1012 {
1013 struct xfrm_policy *pol;
1014 int err = 0;
1015
1016 #ifdef CONFIG_XFRM_SUB_POLICY
1017 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
1018 if (IS_ERR(pol)) {
1019 err = PTR_ERR(pol);
1020 pol = NULL;
1021 }
1022 if (pol || err)
1023 goto end;
1024 #endif
1025 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1026 if (IS_ERR(pol)) {
1027 err = PTR_ERR(pol);
1028 pol = NULL;
1029 }
1030 #ifdef CONFIG_XFRM_SUB_POLICY
1031 end:
1032 #endif
1033 if ((*objp = (void *) pol) != NULL)
1034 *obj_refp = &pol->refcnt;
1035 return err;
1036 }
1037
1038 static inline int policy_to_flow_dir(int dir)
1039 {
1040 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1041 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1042 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1043 return dir;
1044 switch (dir) {
1045 default:
1046 case XFRM_POLICY_IN:
1047 return FLOW_DIR_IN;
1048 case XFRM_POLICY_OUT:
1049 return FLOW_DIR_OUT;
1050 case XFRM_POLICY_FWD:
1051 return FLOW_DIR_FWD;
1052 };
1053 }
1054
1055 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
1056 {
1057 struct xfrm_policy *pol;
1058
1059 read_lock_bh(&xfrm_policy_lock);
1060 if ((pol = sk->sk_policy[dir]) != NULL) {
1061 int match = xfrm_selector_match(&pol->selector, fl,
1062 sk->sk_family);
1063 int err = 0;
1064
1065 if (match) {
1066 err = security_xfrm_policy_lookup(pol, fl->secid,
1067 policy_to_flow_dir(dir));
1068 if (!err)
1069 xfrm_pol_hold(pol);
1070 else if (err == -ESRCH)
1071 pol = NULL;
1072 else
1073 pol = ERR_PTR(err);
1074 } else
1075 pol = NULL;
1076 }
1077 read_unlock_bh(&xfrm_policy_lock);
1078 return pol;
1079 }
1080
1081 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1082 {
1083 struct hlist_head *chain = policy_hash_bysel(&pol->selector,
1084 pol->family, dir);
1085
1086 hlist_add_head(&pol->bydst, chain);
1087 hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
1088 xfrm_policy_count[dir]++;
1089 xfrm_pol_hold(pol);
1090
1091 if (xfrm_bydst_should_resize(dir, NULL))
1092 schedule_work(&xfrm_hash_work);
1093 }
1094
1095 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1096 int dir)
1097 {
1098 if (hlist_unhashed(&pol->bydst))
1099 return NULL;
1100
1101 hlist_del(&pol->bydst);
1102 hlist_del(&pol->byidx);
1103 xfrm_policy_count[dir]--;
1104
1105 return pol;
1106 }
1107
1108 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1109 {
1110 write_lock_bh(&xfrm_policy_lock);
1111 pol = __xfrm_policy_unlink(pol, dir);
1112 write_unlock_bh(&xfrm_policy_lock);
1113 if (pol) {
1114 if (dir < XFRM_POLICY_MAX)
1115 atomic_inc(&flow_cache_genid);
1116 xfrm_policy_kill(pol);
1117 return 0;
1118 }
1119 return -ENOENT;
1120 }
1121 EXPORT_SYMBOL(xfrm_policy_delete);
1122
1123 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1124 {
1125 struct xfrm_policy *old_pol;
1126
1127 #ifdef CONFIG_XFRM_SUB_POLICY
1128 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1129 return -EINVAL;
1130 #endif
1131
1132 write_lock_bh(&xfrm_policy_lock);
1133 old_pol = sk->sk_policy[dir];
1134 sk->sk_policy[dir] = pol;
1135 if (pol) {
1136 pol->curlft.add_time = get_seconds();
1137 pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
1138 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1139 }
1140 if (old_pol)
1141 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1142 write_unlock_bh(&xfrm_policy_lock);
1143
1144 if (old_pol) {
1145 xfrm_policy_kill(old_pol);
1146 }
1147 return 0;
1148 }
1149
1150 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1151 {
1152 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
1153
1154 if (newp) {
1155 newp->selector = old->selector;
1156 if (security_xfrm_policy_clone(old, newp)) {
1157 kfree(newp);
1158 return NULL; /* ENOMEM */
1159 }
1160 newp->lft = old->lft;
1161 newp->curlft = old->curlft;
1162 newp->action = old->action;
1163 newp->flags = old->flags;
1164 newp->xfrm_nr = old->xfrm_nr;
1165 newp->index = old->index;
1166 newp->type = old->type;
1167 memcpy(newp->xfrm_vec, old->xfrm_vec,
1168 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1169 write_lock_bh(&xfrm_policy_lock);
1170 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1171 write_unlock_bh(&xfrm_policy_lock);
1172 xfrm_pol_put(newp);
1173 }
1174 return newp;
1175 }
1176
1177 int __xfrm_sk_clone_policy(struct sock *sk)
1178 {
1179 struct xfrm_policy *p0 = sk->sk_policy[0],
1180 *p1 = sk->sk_policy[1];
1181
1182 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1183 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1184 return -ENOMEM;
1185 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1186 return -ENOMEM;
1187 return 0;
1188 }
1189
1190 static int
1191 xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
1192 unsigned short family)
1193 {
1194 int err;
1195 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1196
1197 if (unlikely(afinfo == NULL))
1198 return -EINVAL;
1199 err = afinfo->get_saddr(local, remote);
1200 xfrm_policy_put_afinfo(afinfo);
1201 return err;
1202 }
1203
1204 /* Resolve list of templates for the flow, given policy. */
1205
1206 static int
1207 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
1208 struct xfrm_state **xfrm,
1209 unsigned short family)
1210 {
1211 int nx;
1212 int i, error;
1213 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1214 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1215 xfrm_address_t tmp;
1216
1217 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1218 struct xfrm_state *x;
1219 xfrm_address_t *remote = daddr;
1220 xfrm_address_t *local = saddr;
1221 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1222
1223 if (tmpl->mode == XFRM_MODE_TUNNEL) {
1224 remote = &tmpl->id.daddr;
1225 local = &tmpl->saddr;
1226 family = tmpl->encap_family;
1227 if (xfrm_addr_any(local, family)) {
1228 error = xfrm_get_saddr(&tmp, remote, family);
1229 if (error)
1230 goto fail;
1231 local = &tmp;
1232 }
1233 }
1234
1235 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1236
1237 if (x && x->km.state == XFRM_STATE_VALID) {
1238 xfrm[nx++] = x;
1239 daddr = remote;
1240 saddr = local;
1241 continue;
1242 }
1243 if (x) {
1244 error = (x->km.state == XFRM_STATE_ERROR ?
1245 -EINVAL : -EAGAIN);
1246 xfrm_state_put(x);
1247 }
1248
1249 if (!tmpl->optional)
1250 goto fail;
1251 }
1252 return nx;
1253
1254 fail:
1255 for (nx--; nx>=0; nx--)
1256 xfrm_state_put(xfrm[nx]);
1257 return error;
1258 }
1259
1260 static int
1261 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
1262 struct xfrm_state **xfrm,
1263 unsigned short family)
1264 {
1265 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1266 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1267 int cnx = 0;
1268 int error;
1269 int ret;
1270 int i;
1271
1272 for (i = 0; i < npols; i++) {
1273 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1274 error = -ENOBUFS;
1275 goto fail;
1276 }
1277
1278 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1279 if (ret < 0) {
1280 error = ret;
1281 goto fail;
1282 } else
1283 cnx += ret;
1284 }
1285
1286 /* found states are sorted for outbound processing */
1287 if (npols > 1)
1288 xfrm_state_sort(xfrm, tpp, cnx, family);
1289
1290 return cnx;
1291
1292 fail:
1293 for (cnx--; cnx>=0; cnx--)
1294 xfrm_state_put(tpp[cnx]);
1295 return error;
1296
1297 }
1298
1299 /* Check that the bundle accepts the flow and its components are
1300 * still valid.
1301 */
1302
1303 static struct dst_entry *
1304 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
1305 {
1306 struct dst_entry *x;
1307 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1308 if (unlikely(afinfo == NULL))
1309 return ERR_PTR(-EINVAL);
1310 x = afinfo->find_bundle(fl, policy);
1311 xfrm_policy_put_afinfo(afinfo);
1312 return x;
1313 }
1314
1315 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1316 * all the metrics... Shortly, bundle a bundle.
1317 */
1318
1319 static int
1320 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
1321 struct flowi *fl, struct dst_entry **dst_p,
1322 unsigned short family)
1323 {
1324 int err;
1325 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1326 if (unlikely(afinfo == NULL))
1327 return -EINVAL;
1328 err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
1329 xfrm_policy_put_afinfo(afinfo);
1330 return err;
1331 }
1332
1333
1334 static int stale_bundle(struct dst_entry *dst);
1335
1336 /* Main function: finds/creates a bundle for given flow.
1337 *
1338 * At the moment we eat a raw IP route. Mostly to speed up lookups
1339 * on interfaces with disabled IPsec.
1340 */
1341 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
1342 struct sock *sk, int flags)
1343 {
1344 struct xfrm_policy *policy;
1345 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1346 int npols;
1347 int pol_dead;
1348 int xfrm_nr;
1349 int pi;
1350 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1351 struct dst_entry *dst, *dst_orig = *dst_p;
1352 int nx = 0;
1353 int err;
1354 u32 genid;
1355 u16 family;
1356 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1357
1358 restart:
1359 genid = atomic_read(&flow_cache_genid);
1360 policy = NULL;
1361 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
1362 pols[pi] = NULL;
1363 npols = 0;
1364 pol_dead = 0;
1365 xfrm_nr = 0;
1366
1367 if (sk && sk->sk_policy[1]) {
1368 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1369 if (IS_ERR(policy))
1370 return PTR_ERR(policy);
1371 }
1372
1373 if (!policy) {
1374 /* To accelerate a bit... */
1375 if ((dst_orig->flags & DST_NOXFRM) ||
1376 !xfrm_policy_count[XFRM_POLICY_OUT])
1377 return 0;
1378
1379 policy = flow_cache_lookup(fl, dst_orig->ops->family,
1380 dir, xfrm_policy_lookup);
1381 if (IS_ERR(policy))
1382 return PTR_ERR(policy);
1383 }
1384
1385 if (!policy)
1386 return 0;
1387
1388 family = dst_orig->ops->family;
1389 policy->curlft.use_time = get_seconds();
1390 pols[0] = policy;
1391 npols ++;
1392 xfrm_nr += pols[0]->xfrm_nr;
1393
1394 switch (policy->action) {
1395 case XFRM_POLICY_BLOCK:
1396 /* Prohibit the flow */
1397 err = -EPERM;
1398 goto error;
1399
1400 case XFRM_POLICY_ALLOW:
1401 #ifndef CONFIG_XFRM_SUB_POLICY
1402 if (policy->xfrm_nr == 0) {
1403 /* Flow passes not transformed. */
1404 xfrm_pol_put(policy);
1405 return 0;
1406 }
1407 #endif
1408
1409 /* Try to find matching bundle.
1410 *
1411 * LATER: help from flow cache. It is optional, this
1412 * is required only for output policy.
1413 */
1414 dst = xfrm_find_bundle(fl, policy, family);
1415 if (IS_ERR(dst)) {
1416 err = PTR_ERR(dst);
1417 goto error;
1418 }
1419
1420 if (dst)
1421 break;
1422
1423 #ifdef CONFIG_XFRM_SUB_POLICY
1424 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1425 pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
1426 fl, family,
1427 XFRM_POLICY_OUT);
1428 if (pols[1]) {
1429 if (IS_ERR(pols[1])) {
1430 err = PTR_ERR(pols[1]);
1431 goto error;
1432 }
1433 if (pols[1]->action == XFRM_POLICY_BLOCK) {
1434 err = -EPERM;
1435 goto error;
1436 }
1437 npols ++;
1438 xfrm_nr += pols[1]->xfrm_nr;
1439 }
1440 }
1441
1442 /*
1443 * Because neither flowi nor bundle information knows about
1444 * transformation template size. On more than one policy usage
1445 * we can realize whether all of them is bypass or not after
1446 * they are searched. See above not-transformed bypass
1447 * is surrounded by non-sub policy configuration, too.
1448 */
1449 if (xfrm_nr == 0) {
1450 /* Flow passes not transformed. */
1451 xfrm_pols_put(pols, npols);
1452 return 0;
1453 }
1454
1455 #endif
1456 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1457
1458 if (unlikely(nx<0)) {
1459 err = nx;
1460 if (err == -EAGAIN && flags) {
1461 DECLARE_WAITQUEUE(wait, current);
1462
1463 add_wait_queue(&km_waitq, &wait);
1464 set_current_state(TASK_INTERRUPTIBLE);
1465 schedule();
1466 set_current_state(TASK_RUNNING);
1467 remove_wait_queue(&km_waitq, &wait);
1468
1469 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1470
1471 if (nx == -EAGAIN && signal_pending(current)) {
1472 err = -ERESTART;
1473 goto error;
1474 }
1475 if (nx == -EAGAIN ||
1476 genid != atomic_read(&flow_cache_genid)) {
1477 xfrm_pols_put(pols, npols);
1478 goto restart;
1479 }
1480 err = nx;
1481 }
1482 if (err < 0)
1483 goto error;
1484 }
1485 if (nx == 0) {
1486 /* Flow passes not transformed. */
1487 xfrm_pols_put(pols, npols);
1488 return 0;
1489 }
1490
1491 dst = dst_orig;
1492 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
1493
1494 if (unlikely(err)) {
1495 int i;
1496 for (i=0; i<nx; i++)
1497 xfrm_state_put(xfrm[i]);
1498 goto error;
1499 }
1500
1501 for (pi = 0; pi < npols; pi++) {
1502 read_lock_bh(&pols[pi]->lock);
1503 pol_dead |= pols[pi]->dead;
1504 read_unlock_bh(&pols[pi]->lock);
1505 }
1506
1507 write_lock_bh(&policy->lock);
1508 if (unlikely(pol_dead || stale_bundle(dst))) {
1509 /* Wow! While we worked on resolving, this
1510 * policy has gone. Retry. It is not paranoia,
1511 * we just cannot enlist new bundle to dead object.
1512 * We can't enlist stable bundles either.
1513 */
1514 write_unlock_bh(&policy->lock);
1515 if (dst)
1516 dst_free(dst);
1517
1518 err = -EHOSTUNREACH;
1519 goto error;
1520 }
1521 dst->next = policy->bundles;
1522 policy->bundles = dst;
1523 dst_hold(dst);
1524 write_unlock_bh(&policy->lock);
1525 }
1526 *dst_p = dst;
1527 dst_release(dst_orig);
1528 xfrm_pols_put(pols, npols);
1529 return 0;
1530
1531 error:
1532 dst_release(dst_orig);
1533 xfrm_pols_put(pols, npols);
1534 *dst_p = NULL;
1535 return err;
1536 }
1537 EXPORT_SYMBOL(xfrm_lookup);
1538
1539 static inline int
1540 xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
1541 {
1542 struct xfrm_state *x;
1543 int err;
1544
1545 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
1546 return 0;
1547 x = skb->sp->xvec[idx];
1548 if (!x->type->reject)
1549 return 0;
1550 xfrm_state_hold(x);
1551 err = x->type->reject(x, skb, fl);
1552 xfrm_state_put(x);
1553 return err;
1554 }
1555
1556 /* When skb is transformed back to its "native" form, we have to
1557 * check policy restrictions. At the moment we make this in maximally
1558 * stupid way. Shame on me. :-) Of course, connected sockets must
1559 * have policy cached at them.
1560 */
1561
1562 static inline int
1563 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1564 unsigned short family)
1565 {
1566 if (xfrm_state_kern(x))
1567 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
1568 return x->id.proto == tmpl->id.proto &&
1569 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1570 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1571 x->props.mode == tmpl->mode &&
1572 ((tmpl->aalgos & (1<<x->props.aalgo)) ||
1573 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1574 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1575 xfrm_state_addr_cmp(tmpl, x, family));
1576 }
1577
1578 /*
1579 * 0 or more than 0 is returned when validation is succeeded (either bypass
1580 * because of optional transport mode, or next index of the mathced secpath
1581 * state with the template.
1582 * -1 is returned when no matching template is found.
1583 * Otherwise "-2 - errored_index" is returned.
1584 */
1585 static inline int
1586 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
1587 unsigned short family)
1588 {
1589 int idx = start;
1590
1591 if (tmpl->optional) {
1592 if (tmpl->mode == XFRM_MODE_TRANSPORT)
1593 return start;
1594 } else
1595 start = -1;
1596 for (; idx < sp->len; idx++) {
1597 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1598 return ++idx;
1599 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
1600 if (start == -1)
1601 start = -2-idx;
1602 break;
1603 }
1604 }
1605 return start;
1606 }
1607
1608 int
1609 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
1610 {
1611 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1612 int err;
1613
1614 if (unlikely(afinfo == NULL))
1615 return -EAFNOSUPPORT;
1616
1617 afinfo->decode_session(skb, fl);
1618 err = security_xfrm_decode_session(skb, &fl->secid);
1619 xfrm_policy_put_afinfo(afinfo);
1620 return err;
1621 }
1622 EXPORT_SYMBOL(xfrm_decode_session);
1623
1624 static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
1625 {
1626 for (; k < sp->len; k++) {
1627 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
1628 *idxp = k;
1629 return 1;
1630 }
1631 }
1632
1633 return 0;
1634 }
1635
1636 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1637 unsigned short family)
1638 {
1639 struct xfrm_policy *pol;
1640 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1641 int npols = 0;
1642 int xfrm_nr;
1643 int pi;
1644 struct flowi fl;
1645 u8 fl_dir = policy_to_flow_dir(dir);
1646 int xerr_idx = -1;
1647
1648 if (xfrm_decode_session(skb, &fl, family) < 0)
1649 return 0;
1650 nf_nat_decode_session(skb, &fl, family);
1651
1652 /* First, check used SA against their selectors. */
1653 if (skb->sp) {
1654 int i;
1655
1656 for (i=skb->sp->len-1; i>=0; i--) {
1657 struct xfrm_state *x = skb->sp->xvec[i];
1658 if (!xfrm_selector_match(&x->sel, &fl, family))
1659 return 0;
1660 }
1661 }
1662
1663 pol = NULL;
1664 if (sk && sk->sk_policy[dir]) {
1665 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
1666 if (IS_ERR(pol))
1667 return 0;
1668 }
1669
1670 if (!pol)
1671 pol = flow_cache_lookup(&fl, family, fl_dir,
1672 xfrm_policy_lookup);
1673
1674 if (IS_ERR(pol))
1675 return 0;
1676
1677 if (!pol) {
1678 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
1679 xfrm_secpath_reject(xerr_idx, skb, &fl);
1680 return 0;
1681 }
1682 return 1;
1683 }
1684
1685 pol->curlft.use_time = get_seconds();
1686
1687 pols[0] = pol;
1688 npols ++;
1689 #ifdef CONFIG_XFRM_SUB_POLICY
1690 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1691 pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
1692 &fl, family,
1693 XFRM_POLICY_IN);
1694 if (pols[1]) {
1695 if (IS_ERR(pols[1]))
1696 return 0;
1697 pols[1]->curlft.use_time = get_seconds();
1698 npols ++;
1699 }
1700 }
1701 #endif
1702
1703 if (pol->action == XFRM_POLICY_ALLOW) {
1704 struct sec_path *sp;
1705 static struct sec_path dummy;
1706 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
1707 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
1708 struct xfrm_tmpl **tpp = tp;
1709 int ti = 0;
1710 int i, k;
1711
1712 if ((sp = skb->sp) == NULL)
1713 sp = &dummy;
1714
1715 for (pi = 0; pi < npols; pi++) {
1716 if (pols[pi] != pol &&
1717 pols[pi]->action != XFRM_POLICY_ALLOW)
1718 goto reject;
1719 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)
1720 goto reject_error;
1721 for (i = 0; i < pols[pi]->xfrm_nr; i++)
1722 tpp[ti++] = &pols[pi]->xfrm_vec[i];
1723 }
1724 xfrm_nr = ti;
1725 if (npols > 1) {
1726 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
1727 tpp = stp;
1728 }
1729
1730 /* For each tunnel xfrm, find the first matching tmpl.
1731 * For each tmpl before that, find corresponding xfrm.
1732 * Order is _important_. Later we will implement
1733 * some barriers, but at the moment barriers
1734 * are implied between each two transformations.
1735 */
1736 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
1737 k = xfrm_policy_ok(tpp[i], sp, k, family);
1738 if (k < 0) {
1739 if (k < -1)
1740 /* "-2 - errored_index" returned */
1741 xerr_idx = -(2+k);
1742 goto reject;
1743 }
1744 }
1745
1746 if (secpath_has_nontransport(sp, k, &xerr_idx))
1747 goto reject;
1748
1749 xfrm_pols_put(pols, npols);
1750 return 1;
1751 }
1752
1753 reject:
1754 xfrm_secpath_reject(xerr_idx, skb, &fl);
1755 reject_error:
1756 xfrm_pols_put(pols, npols);
1757 return 0;
1758 }
1759 EXPORT_SYMBOL(__xfrm_policy_check);
1760
1761 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1762 {
1763 struct flowi fl;
1764
1765 if (xfrm_decode_session(skb, &fl, family) < 0)
1766 return 0;
1767
1768 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
1769 }
1770 EXPORT_SYMBOL(__xfrm_route_forward);
1771
1772 /* Optimize later using cookies and generation ids. */
1773
1774 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1775 {
1776 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
1777 * to "-1" to force all XFRM destinations to get validated by
1778 * dst_ops->check on every use. We do this because when a
1779 * normal route referenced by an XFRM dst is obsoleted we do
1780 * not go looking around for all parent referencing XFRM dsts
1781 * so that we can invalidate them. It is just too much work.
1782 * Instead we make the checks here on every use. For example:
1783 *
1784 * XFRM dst A --> IPv4 dst X
1785 *
1786 * X is the "xdst->route" of A (X is also the "dst->path" of A
1787 * in this example). If X is marked obsolete, "A" will not
1788 * notice. That's what we are validating here via the
1789 * stale_bundle() check.
1790 *
1791 * When a policy's bundle is pruned, we dst_free() the XFRM
1792 * dst which causes it's ->obsolete field to be set to a
1793 * positive non-zero integer. If an XFRM dst has been pruned
1794 * like this, we want to force a new route lookup.
1795 */
1796 if (dst->obsolete < 0 && !stale_bundle(dst))
1797 return dst;
1798
1799 return NULL;
1800 }
1801
1802 static int stale_bundle(struct dst_entry *dst)
1803 {
1804 return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
1805 }
1806
1807 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
1808 {
1809 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
1810 dst->dev = &loopback_dev;
1811 dev_hold(&loopback_dev);
1812 dev_put(dev);
1813 }
1814 }
1815 EXPORT_SYMBOL(xfrm_dst_ifdown);
1816
1817 static void xfrm_link_failure(struct sk_buff *skb)
1818 {
1819 /* Impossible. Such dst must be popped before reaches point of failure. */
1820 return;
1821 }
1822
1823 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
1824 {
1825 if (dst) {
1826 if (dst->obsolete) {
1827 dst_release(dst);
1828 dst = NULL;
1829 }
1830 }
1831 return dst;
1832 }
1833
1834 static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
1835 {
1836 struct dst_entry *dst, **dstp;
1837
1838 write_lock(&pol->lock);
1839 dstp = &pol->bundles;
1840 while ((dst=*dstp) != NULL) {
1841 if (func(dst)) {
1842 *dstp = dst->next;
1843 dst->next = *gc_list_p;
1844 *gc_list_p = dst;
1845 } else {
1846 dstp = &dst->next;
1847 }
1848 }
1849 write_unlock(&pol->lock);
1850 }
1851
1852 static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
1853 {
1854 struct dst_entry *gc_list = NULL;
1855 int dir;
1856
1857 read_lock_bh(&xfrm_policy_lock);
1858 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
1859 struct xfrm_policy *pol;
1860 struct hlist_node *entry;
1861 struct hlist_head *table;
1862 int i;
1863
1864 hlist_for_each_entry(pol, entry,
1865 &xfrm_policy_inexact[dir], bydst)
1866 prune_one_bundle(pol, func, &gc_list);
1867
1868 table = xfrm_policy_bydst[dir].table;
1869 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
1870 hlist_for_each_entry(pol, entry, table + i, bydst)
1871 prune_one_bundle(pol, func, &gc_list);
1872 }
1873 }
1874 read_unlock_bh(&xfrm_policy_lock);
1875
1876 while (gc_list) {
1877 struct dst_entry *dst = gc_list;
1878 gc_list = dst->next;
1879 dst_free(dst);
1880 }
1881 }
1882
1883 static int unused_bundle(struct dst_entry *dst)
1884 {
1885 return !atomic_read(&dst->__refcnt);
1886 }
1887
1888 static void __xfrm_garbage_collect(void)
1889 {
1890 xfrm_prune_bundles(unused_bundle);
1891 }
1892
1893 static int xfrm_flush_bundles(void)
1894 {
1895 xfrm_prune_bundles(stale_bundle);
1896 return 0;
1897 }
1898
1899 void xfrm_init_pmtu(struct dst_entry *dst)
1900 {
1901 do {
1902 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1903 u32 pmtu, route_mtu_cached;
1904
1905 pmtu = dst_mtu(dst->child);
1906 xdst->child_mtu_cached = pmtu;
1907
1908 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
1909
1910 route_mtu_cached = dst_mtu(xdst->route);
1911 xdst->route_mtu_cached = route_mtu_cached;
1912
1913 if (pmtu > route_mtu_cached)
1914 pmtu = route_mtu_cached;
1915
1916 dst->metrics[RTAX_MTU-1] = pmtu;
1917 } while ((dst = dst->next));
1918 }
1919
1920 EXPORT_SYMBOL(xfrm_init_pmtu);
1921
1922 /* Check that the bundle accepts the flow and its components are
1923 * still valid.
1924 */
1925
1926 int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
1927 struct flowi *fl, int family, int strict)
1928 {
1929 struct dst_entry *dst = &first->u.dst;
1930 struct xfrm_dst *last;
1931 u32 mtu;
1932
1933 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
1934 (dst->dev && !netif_running(dst->dev)))
1935 return 0;
1936
1937 last = NULL;
1938
1939 do {
1940 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1941
1942 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
1943 return 0;
1944 if (fl && pol &&
1945 !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
1946 return 0;
1947 if (dst->xfrm->km.state != XFRM_STATE_VALID)
1948 return 0;
1949 if (xdst->genid != dst->xfrm->genid)
1950 return 0;
1951
1952 if (strict && fl && dst->xfrm->props.mode != XFRM_MODE_TUNNEL &&
1953 !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
1954 return 0;
1955
1956 mtu = dst_mtu(dst->child);
1957 if (xdst->child_mtu_cached != mtu) {
1958 last = xdst;
1959 xdst->child_mtu_cached = mtu;
1960 }
1961
1962 if (!dst_check(xdst->route, xdst->route_cookie))
1963 return 0;
1964 mtu = dst_mtu(xdst->route);
1965 if (xdst->route_mtu_cached != mtu) {
1966 last = xdst;
1967 xdst->route_mtu_cached = mtu;
1968 }
1969
1970 dst = dst->child;
1971 } while (dst->xfrm);
1972
1973 if (likely(!last))
1974 return 1;
1975
1976 mtu = last->child_mtu_cached;
1977 for (;;) {
1978 dst = &last->u.dst;
1979
1980 mtu = xfrm_state_mtu(dst->xfrm, mtu);
1981 if (mtu > last->route_mtu_cached)
1982 mtu = last->route_mtu_cached;
1983 dst->metrics[RTAX_MTU-1] = mtu;
1984
1985 if (last == first)
1986 break;
1987
1988 last = last->u.next;
1989 last->child_mtu_cached = mtu;
1990 }
1991
1992 return 1;
1993 }
1994
1995 EXPORT_SYMBOL(xfrm_bundle_ok);
1996
1997 #ifdef CONFIG_AUDITSYSCALL
1998 /* Audit addition and deletion of SAs and ipsec policy */
1999
2000 void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
2001 struct xfrm_policy *xp, struct xfrm_state *x)
2002 {
2003
2004 char *secctx;
2005 u32 secctx_len;
2006 struct xfrm_sec_ctx *sctx = NULL;
2007 struct audit_buffer *audit_buf;
2008 int family;
2009 extern int audit_enabled;
2010
2011 if (audit_enabled == 0)
2012 return;
2013
2014 BUG_ON((type == AUDIT_MAC_IPSEC_ADDSA ||
2015 type == AUDIT_MAC_IPSEC_DELSA) && !x);
2016 BUG_ON((type == AUDIT_MAC_IPSEC_ADDSPD ||
2017 type == AUDIT_MAC_IPSEC_DELSPD) && !xp);
2018
2019 audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
2020 if (audit_buf == NULL)
2021 return;
2022
2023 switch(type) {
2024 case AUDIT_MAC_IPSEC_ADDSA:
2025 audit_log_format(audit_buf, "SAD add: auid=%u", auid);
2026 break;
2027 case AUDIT_MAC_IPSEC_DELSA:
2028 audit_log_format(audit_buf, "SAD delete: auid=%u", auid);
2029 break;
2030 case AUDIT_MAC_IPSEC_ADDSPD:
2031 audit_log_format(audit_buf, "SPD add: auid=%u", auid);
2032 break;
2033 case AUDIT_MAC_IPSEC_DELSPD:
2034 audit_log_format(audit_buf, "SPD delete: auid=%u", auid);
2035 break;
2036 default:
2037 return;
2038 }
2039
2040 if (sid != 0 &&
2041 security_secid_to_secctx(sid, &secctx, &secctx_len) == 0)
2042 audit_log_format(audit_buf, " subj=%s", secctx);
2043 else
2044 audit_log_task_context(audit_buf);
2045
2046 if (xp) {
2047 family = xp->selector.family;
2048 if (xp->security)
2049 sctx = xp->security;
2050 } else {
2051 family = x->props.family;
2052 if (x->security)
2053 sctx = x->security;
2054 }
2055
2056 if (sctx)
2057 audit_log_format(audit_buf,
2058 " sec_alg=%u sec_doi=%u sec_obj=%s",
2059 sctx->ctx_alg, sctx->ctx_doi, sctx->ctx_str);
2060
2061 switch(family) {
2062 case AF_INET:
2063 {
2064 struct in_addr saddr, daddr;
2065 if (xp) {
2066 saddr.s_addr = xp->selector.saddr.a4;
2067 daddr.s_addr = xp->selector.daddr.a4;
2068 } else {
2069 saddr.s_addr = x->props.saddr.a4;
2070 daddr.s_addr = x->id.daddr.a4;
2071 }
2072 audit_log_format(audit_buf,
2073 " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
2074 NIPQUAD(saddr), NIPQUAD(daddr));
2075 }
2076 break;
2077 case AF_INET6:
2078 {
2079 struct in6_addr saddr6, daddr6;
2080 if (xp) {
2081 memcpy(&saddr6, xp->selector.saddr.a6,
2082 sizeof(struct in6_addr));
2083 memcpy(&daddr6, xp->selector.daddr.a6,
2084 sizeof(struct in6_addr));
2085 } else {
2086 memcpy(&saddr6, x->props.saddr.a6,
2087 sizeof(struct in6_addr));
2088 memcpy(&daddr6, x->id.daddr.a6,
2089 sizeof(struct in6_addr));
2090 }
2091 audit_log_format(audit_buf,
2092 " src=" NIP6_FMT " dst=" NIP6_FMT,
2093 NIP6(saddr6), NIP6(daddr6));
2094 }
2095 break;
2096 }
2097
2098 if (x)
2099 audit_log_format(audit_buf, " spi=%lu(0x%lx) protocol=%s",
2100 (unsigned long)ntohl(x->id.spi),
2101 (unsigned long)ntohl(x->id.spi),
2102 x->id.proto == IPPROTO_AH ? "AH" :
2103 (x->id.proto == IPPROTO_ESP ?
2104 "ESP" : "IPCOMP"));
2105
2106 audit_log_format(audit_buf, " res=%u", result);
2107 audit_log_end(audit_buf);
2108 }
2109
2110 EXPORT_SYMBOL(xfrm_audit_log);
2111 #endif /* CONFIG_AUDITSYSCALL */
2112
2113 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2114 {
2115 int err = 0;
2116 if (unlikely(afinfo == NULL))
2117 return -EINVAL;
2118 if (unlikely(afinfo->family >= NPROTO))
2119 return -EAFNOSUPPORT;
2120 write_lock_bh(&xfrm_policy_afinfo_lock);
2121 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2122 err = -ENOBUFS;
2123 else {
2124 struct dst_ops *dst_ops = afinfo->dst_ops;
2125 if (likely(dst_ops->kmem_cachep == NULL))
2126 dst_ops->kmem_cachep = xfrm_dst_cache;
2127 if (likely(dst_ops->check == NULL))
2128 dst_ops->check = xfrm_dst_check;
2129 if (likely(dst_ops->negative_advice == NULL))
2130 dst_ops->negative_advice = xfrm_negative_advice;
2131 if (likely(dst_ops->link_failure == NULL))
2132 dst_ops->link_failure = xfrm_link_failure;
2133 if (likely(afinfo->garbage_collect == NULL))
2134 afinfo->garbage_collect = __xfrm_garbage_collect;
2135 xfrm_policy_afinfo[afinfo->family] = afinfo;
2136 }
2137 write_unlock_bh(&xfrm_policy_afinfo_lock);
2138 return err;
2139 }
2140 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2141
2142 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2143 {
2144 int err = 0;
2145 if (unlikely(afinfo == NULL))
2146 return -EINVAL;
2147 if (unlikely(afinfo->family >= NPROTO))
2148 return -EAFNOSUPPORT;
2149 write_lock_bh(&xfrm_policy_afinfo_lock);
2150 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2151 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2152 err = -EINVAL;
2153 else {
2154 struct dst_ops *dst_ops = afinfo->dst_ops;
2155 xfrm_policy_afinfo[afinfo->family] = NULL;
2156 dst_ops->kmem_cachep = NULL;
2157 dst_ops->check = NULL;
2158 dst_ops->negative_advice = NULL;
2159 dst_ops->link_failure = NULL;
2160 afinfo->garbage_collect = NULL;
2161 }
2162 }
2163 write_unlock_bh(&xfrm_policy_afinfo_lock);
2164 return err;
2165 }
2166 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2167
2168 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2169 {
2170 struct xfrm_policy_afinfo *afinfo;
2171 if (unlikely(family >= NPROTO))
2172 return NULL;
2173 read_lock(&xfrm_policy_afinfo_lock);
2174 afinfo = xfrm_policy_afinfo[family];
2175 if (unlikely(!afinfo))
2176 read_unlock(&xfrm_policy_afinfo_lock);
2177 return afinfo;
2178 }
2179
2180 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2181 {
2182 read_unlock(&xfrm_policy_afinfo_lock);
2183 }
2184
2185 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family)
2186 {
2187 struct xfrm_policy_afinfo *afinfo;
2188 if (unlikely(family >= NPROTO))
2189 return NULL;
2190 write_lock_bh(&xfrm_policy_afinfo_lock);
2191 afinfo = xfrm_policy_afinfo[family];
2192 if (unlikely(!afinfo))
2193 write_unlock_bh(&xfrm_policy_afinfo_lock);
2194 return afinfo;
2195 }
2196
2197 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo)
2198 {
2199 write_unlock_bh(&xfrm_policy_afinfo_lock);
2200 }
2201
2202 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2203 {
2204 switch (event) {
2205 case NETDEV_DOWN:
2206 xfrm_flush_bundles();
2207 }
2208 return NOTIFY_DONE;
2209 }
2210
2211 static struct notifier_block xfrm_dev_notifier = {
2212 xfrm_dev_event,
2213 NULL,
2214 0
2215 };
2216
2217 static void __init xfrm_policy_init(void)
2218 {
2219 unsigned int hmask, sz;
2220 int dir;
2221
2222 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2223 sizeof(struct xfrm_dst),
2224 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2225 NULL, NULL);
2226
2227 hmask = 8 - 1;
2228 sz = (hmask+1) * sizeof(struct hlist_head);
2229
2230 xfrm_policy_byidx = xfrm_hash_alloc(sz);
2231 xfrm_idx_hmask = hmask;
2232 if (!xfrm_policy_byidx)
2233 panic("XFRM: failed to allocate byidx hash\n");
2234
2235 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2236 struct xfrm_policy_hash *htab;
2237
2238 INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
2239
2240 htab = &xfrm_policy_bydst[dir];
2241 htab->table = xfrm_hash_alloc(sz);
2242 htab->hmask = hmask;
2243 if (!htab->table)
2244 panic("XFRM: failed to allocate bydst hash\n");
2245 }
2246
2247 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
2248 register_netdevice_notifier(&xfrm_dev_notifier);
2249 }
2250
2251 void __init xfrm_init(void)
2252 {
2253 xfrm_state_init();
2254 xfrm_policy_init();
2255 xfrm_input_init();
2256 }
2257
2258 #ifdef CONFIG_XFRM_MIGRATE
2259 static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
2260 struct xfrm_selector *sel_tgt)
2261 {
2262 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2263 if (sel_tgt->family == sel_cmp->family &&
2264 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2265 sel_cmp->family) == 0 &&
2266 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2267 sel_cmp->family) == 0 &&
2268 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2269 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2270 return 1;
2271 }
2272 } else {
2273 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2274 return 1;
2275 }
2276 }
2277 return 0;
2278 }
2279
2280 static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
2281 u8 dir, u8 type)
2282 {
2283 struct xfrm_policy *pol, *ret = NULL;
2284 struct hlist_node *entry;
2285 struct hlist_head *chain;
2286 u32 priority = ~0U;
2287
2288 read_lock_bh(&xfrm_policy_lock);
2289 chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
2290 hlist_for_each_entry(pol, entry, chain, bydst) {
2291 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2292 pol->type == type) {
2293 ret = pol;
2294 priority = ret->priority;
2295 break;
2296 }
2297 }
2298 chain = &xfrm_policy_inexact[dir];
2299 hlist_for_each_entry(pol, entry, chain, bydst) {
2300 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2301 pol->type == type &&
2302 pol->priority < priority) {
2303 ret = pol;
2304 break;
2305 }
2306 }
2307
2308 if (ret)
2309 xfrm_pol_hold(ret);
2310
2311 read_unlock_bh(&xfrm_policy_lock);
2312
2313 return ret;
2314 }
2315
2316 static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
2317 {
2318 int match = 0;
2319
2320 if (t->mode == m->mode && t->id.proto == m->proto &&
2321 (m->reqid == 0 || t->reqid == m->reqid)) {
2322 switch (t->mode) {
2323 case XFRM_MODE_TUNNEL:
2324 case XFRM_MODE_BEET:
2325 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2326 m->old_family) == 0 &&
2327 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2328 m->old_family) == 0) {
2329 match = 1;
2330 }
2331 break;
2332 case XFRM_MODE_TRANSPORT:
2333 /* in case of transport mode, template does not store
2334 any IP addresses, hence we just compare mode and
2335 protocol */
2336 match = 1;
2337 break;
2338 default:
2339 break;
2340 }
2341 }
2342 return match;
2343 }
2344
2345 /* update endpoint address(es) of template(s) */
2346 static int xfrm_policy_migrate(struct xfrm_policy *pol,
2347 struct xfrm_migrate *m, int num_migrate)
2348 {
2349 struct xfrm_migrate *mp;
2350 struct dst_entry *dst;
2351 int i, j, n = 0;
2352
2353 write_lock_bh(&pol->lock);
2354 if (unlikely(pol->dead)) {
2355 /* target policy has been deleted */
2356 write_unlock_bh(&pol->lock);
2357 return -ENOENT;
2358 }
2359
2360 for (i = 0; i < pol->xfrm_nr; i++) {
2361 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2362 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2363 continue;
2364 n++;
2365 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL)
2366 continue;
2367 /* update endpoints */
2368 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2369 sizeof(pol->xfrm_vec[i].id.daddr));
2370 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2371 sizeof(pol->xfrm_vec[i].saddr));
2372 pol->xfrm_vec[i].encap_family = mp->new_family;
2373 /* flush bundles */
2374 while ((dst = pol->bundles) != NULL) {
2375 pol->bundles = dst->next;
2376 dst_free(dst);
2377 }
2378 }
2379 }
2380
2381 write_unlock_bh(&pol->lock);
2382
2383 if (!n)
2384 return -ENODATA;
2385
2386 return 0;
2387 }
2388
2389 static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
2390 {
2391 int i, j;
2392
2393 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2394 return -EINVAL;
2395
2396 for (i = 0; i < num_migrate; i++) {
2397 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2398 m[i].old_family) == 0) &&
2399 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2400 m[i].old_family) == 0))
2401 return -EINVAL;
2402 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2403 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2404 return -EINVAL;
2405
2406 /* check if there is any duplicated entry */
2407 for (j = i + 1; j < num_migrate; j++) {
2408 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2409 sizeof(m[i].old_daddr)) &&
2410 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2411 sizeof(m[i].old_saddr)) &&
2412 m[i].proto == m[j].proto &&
2413 m[i].mode == m[j].mode &&
2414 m[i].reqid == m[j].reqid &&
2415 m[i].old_family == m[j].old_family)
2416 return -EINVAL;
2417 }
2418 }
2419
2420 return 0;
2421 }
2422
2423 int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2424 struct xfrm_migrate *m, int num_migrate)
2425 {
2426 int i, err, nx_cur = 0, nx_new = 0;
2427 struct xfrm_policy *pol = NULL;
2428 struct xfrm_state *x, *xc;
2429 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2430 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2431 struct xfrm_migrate *mp;
2432
2433 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2434 goto out;
2435
2436 /* Stage 1 - find policy */
2437 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2438 err = -ENOENT;
2439 goto out;
2440 }
2441
2442 /* Stage 2 - find and update state(s) */
2443 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2444 if ((x = xfrm_migrate_state_find(mp))) {
2445 x_cur[nx_cur] = x;
2446 nx_cur++;
2447 if ((xc = xfrm_state_migrate(x, mp))) {
2448 x_new[nx_new] = xc;
2449 nx_new++;
2450 } else {
2451 err = -ENODATA;
2452 goto restore_state;
2453 }
2454 }
2455 }
2456
2457 /* Stage 3 - update policy */
2458 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2459 goto restore_state;
2460
2461 /* Stage 4 - delete old state(s) */
2462 if (nx_cur) {
2463 xfrm_states_put(x_cur, nx_cur);
2464 xfrm_states_delete(x_cur, nx_cur);
2465 }
2466
2467 /* Stage 5 - announce */
2468 km_migrate(sel, dir, type, m, num_migrate);
2469
2470 xfrm_pol_put(pol);
2471
2472 return 0;
2473 out:
2474 return err;
2475
2476 restore_state:
2477 if (pol)
2478 xfrm_pol_put(pol);
2479 if (nx_cur)
2480 xfrm_states_put(x_cur, nx_cur);
2481 if (nx_new)
2482 xfrm_states_delete(x_new, nx_new);
2483
2484 return err;
2485 }
2486 EXPORT_SYMBOL(xfrm_migrate);
2487 #endif
2488