6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
31 #ifdef CONFIG_XFRM_STATISTICS
35 #include "xfrm_hash.h"
37 int sysctl_xfrm_larval_drop __read_mostly
= 1;
39 DEFINE_MUTEX(xfrm_cfg_mutex
);
40 EXPORT_SYMBOL(xfrm_cfg_mutex
);
42 static DEFINE_RWLOCK(xfrm_policy_lock
);
44 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock
);
45 static struct xfrm_policy_afinfo
*xfrm_policy_afinfo
[NPROTO
];
47 static struct kmem_cache
*xfrm_dst_cache __read_mostly
;
49 static HLIST_HEAD(xfrm_policy_gc_list
);
50 static DEFINE_SPINLOCK(xfrm_policy_gc_lock
);
52 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
);
53 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
);
54 static void xfrm_init_pmtu(struct dst_entry
*dst
);
57 __xfrm4_selector_match(struct xfrm_selector
*sel
, struct flowi
*fl
)
59 return addr_match(&fl
->fl4_dst
, &sel
->daddr
, sel
->prefixlen_d
) &&
60 addr_match(&fl
->fl4_src
, &sel
->saddr
, sel
->prefixlen_s
) &&
61 !((xfrm_flowi_dport(fl
) ^ sel
->dport
) & sel
->dport_mask
) &&
62 !((xfrm_flowi_sport(fl
) ^ sel
->sport
) & sel
->sport_mask
) &&
63 (fl
->proto
== sel
->proto
|| !sel
->proto
) &&
64 (fl
->oif
== sel
->ifindex
|| !sel
->ifindex
);
68 __xfrm6_selector_match(struct xfrm_selector
*sel
, struct flowi
*fl
)
70 return addr_match(&fl
->fl6_dst
, &sel
->daddr
, sel
->prefixlen_d
) &&
71 addr_match(&fl
->fl6_src
, &sel
->saddr
, sel
->prefixlen_s
) &&
72 !((xfrm_flowi_dport(fl
) ^ sel
->dport
) & sel
->dport_mask
) &&
73 !((xfrm_flowi_sport(fl
) ^ sel
->sport
) & sel
->sport_mask
) &&
74 (fl
->proto
== sel
->proto
|| !sel
->proto
) &&
75 (fl
->oif
== sel
->ifindex
|| !sel
->ifindex
);
78 int xfrm_selector_match(struct xfrm_selector
*sel
, struct flowi
*fl
,
79 unsigned short family
)
83 return __xfrm4_selector_match(sel
, fl
);
85 return __xfrm6_selector_match(sel
, fl
);
90 static inline struct dst_entry
*__xfrm_dst_lookup(struct net
*net
, int tos
,
91 xfrm_address_t
*saddr
,
92 xfrm_address_t
*daddr
,
95 struct xfrm_policy_afinfo
*afinfo
;
96 struct dst_entry
*dst
;
98 afinfo
= xfrm_policy_get_afinfo(family
);
99 if (unlikely(afinfo
== NULL
))
100 return ERR_PTR(-EAFNOSUPPORT
);
102 dst
= afinfo
->dst_lookup(net
, tos
, saddr
, daddr
);
104 xfrm_policy_put_afinfo(afinfo
);
109 static inline struct dst_entry
*xfrm_dst_lookup(struct xfrm_state
*x
, int tos
,
110 xfrm_address_t
*prev_saddr
,
111 xfrm_address_t
*prev_daddr
,
114 struct net
*net
= xs_net(x
);
115 xfrm_address_t
*saddr
= &x
->props
.saddr
;
116 xfrm_address_t
*daddr
= &x
->id
.daddr
;
117 struct dst_entry
*dst
;
119 if (x
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
) {
123 if (x
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
) {
128 dst
= __xfrm_dst_lookup(net
, tos
, saddr
, daddr
, family
);
131 if (prev_saddr
!= saddr
)
132 memcpy(prev_saddr
, saddr
, sizeof(*prev_saddr
));
133 if (prev_daddr
!= daddr
)
134 memcpy(prev_daddr
, daddr
, sizeof(*prev_daddr
));
140 static inline unsigned long make_jiffies(long secs
)
142 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
143 return MAX_SCHEDULE_TIMEOUT
-1;
148 static void xfrm_policy_timer(unsigned long data
)
150 struct xfrm_policy
*xp
= (struct xfrm_policy
*)data
;
151 unsigned long now
= get_seconds();
152 long next
= LONG_MAX
;
156 read_lock(&xp
->lock
);
161 dir
= xfrm_policy_id2dir(xp
->index
);
163 if (xp
->lft
.hard_add_expires_seconds
) {
164 long tmo
= xp
->lft
.hard_add_expires_seconds
+
165 xp
->curlft
.add_time
- now
;
171 if (xp
->lft
.hard_use_expires_seconds
) {
172 long tmo
= xp
->lft
.hard_use_expires_seconds
+
173 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
179 if (xp
->lft
.soft_add_expires_seconds
) {
180 long tmo
= xp
->lft
.soft_add_expires_seconds
+
181 xp
->curlft
.add_time
- now
;
184 tmo
= XFRM_KM_TIMEOUT
;
189 if (xp
->lft
.soft_use_expires_seconds
) {
190 long tmo
= xp
->lft
.soft_use_expires_seconds
+
191 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
194 tmo
= XFRM_KM_TIMEOUT
;
201 km_policy_expired(xp
, dir
, 0, 0);
202 if (next
!= LONG_MAX
&&
203 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
207 read_unlock(&xp
->lock
);
212 read_unlock(&xp
->lock
);
213 if (!xfrm_policy_delete(xp
, dir
))
214 km_policy_expired(xp
, dir
, 1, 0);
219 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
223 struct xfrm_policy
*xfrm_policy_alloc(struct net
*net
, gfp_t gfp
)
225 struct xfrm_policy
*policy
;
227 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
230 write_pnet(&policy
->xp_net
, net
);
231 INIT_LIST_HEAD(&policy
->walk
.all
);
232 INIT_HLIST_NODE(&policy
->bydst
);
233 INIT_HLIST_NODE(&policy
->byidx
);
234 rwlock_init(&policy
->lock
);
235 atomic_set(&policy
->refcnt
, 1);
236 setup_timer(&policy
->timer
, xfrm_policy_timer
,
237 (unsigned long)policy
);
241 EXPORT_SYMBOL(xfrm_policy_alloc
);
243 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
245 void xfrm_policy_destroy(struct xfrm_policy
*policy
)
247 BUG_ON(!policy
->walk
.dead
);
249 BUG_ON(policy
->bundles
);
251 if (del_timer(&policy
->timer
))
254 security_xfrm_policy_free(policy
->security
);
257 EXPORT_SYMBOL(xfrm_policy_destroy
);
259 static void xfrm_policy_gc_kill(struct xfrm_policy
*policy
)
261 struct dst_entry
*dst
;
263 while ((dst
= policy
->bundles
) != NULL
) {
264 policy
->bundles
= dst
->next
;
268 if (del_timer(&policy
->timer
))
269 atomic_dec(&policy
->refcnt
);
271 if (atomic_read(&policy
->refcnt
) > 1)
274 xfrm_pol_put(policy
);
277 static void xfrm_policy_gc_task(struct work_struct
*work
)
279 struct xfrm_policy
*policy
;
280 struct hlist_node
*entry
, *tmp
;
281 struct hlist_head gc_list
;
283 spin_lock_bh(&xfrm_policy_gc_lock
);
284 gc_list
.first
= xfrm_policy_gc_list
.first
;
285 INIT_HLIST_HEAD(&xfrm_policy_gc_list
);
286 spin_unlock_bh(&xfrm_policy_gc_lock
);
288 hlist_for_each_entry_safe(policy
, entry
, tmp
, &gc_list
, bydst
)
289 xfrm_policy_gc_kill(policy
);
291 static DECLARE_WORK(xfrm_policy_gc_work
, xfrm_policy_gc_task
);
293 /* Rule must be locked. Release descentant resources, announce
294 * entry dead. The rule must be unlinked from lists to the moment.
297 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
301 write_lock_bh(&policy
->lock
);
302 dead
= policy
->walk
.dead
;
303 policy
->walk
.dead
= 1;
304 write_unlock_bh(&policy
->lock
);
306 if (unlikely(dead
)) {
311 spin_lock_bh(&xfrm_policy_gc_lock
);
312 hlist_add_head(&policy
->bydst
, &xfrm_policy_gc_list
);
313 spin_unlock_bh(&xfrm_policy_gc_lock
);
315 schedule_work(&xfrm_policy_gc_work
);
318 static unsigned int xfrm_policy_hashmax __read_mostly
= 1 * 1024 * 1024;
320 static inline unsigned int idx_hash(struct net
*net
, u32 index
)
322 return __idx_hash(index
, net
->xfrm
.policy_idx_hmask
);
325 static struct hlist_head
*policy_hash_bysel(struct net
*net
, struct xfrm_selector
*sel
, unsigned short family
, int dir
)
327 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
328 unsigned int hash
= __sel_hash(sel
, family
, hmask
);
330 return (hash
== hmask
+ 1 ?
331 &net
->xfrm
.policy_inexact
[dir
] :
332 net
->xfrm
.policy_bydst
[dir
].table
+ hash
);
335 static struct hlist_head
*policy_hash_direct(struct net
*net
, xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, unsigned short family
, int dir
)
337 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
338 unsigned int hash
= __addr_hash(daddr
, saddr
, family
, hmask
);
340 return net
->xfrm
.policy_bydst
[dir
].table
+ hash
;
343 static void xfrm_dst_hash_transfer(struct hlist_head
*list
,
344 struct hlist_head
*ndsttable
,
345 unsigned int nhashmask
)
347 struct hlist_node
*entry
, *tmp
, *entry0
= NULL
;
348 struct xfrm_policy
*pol
;
352 hlist_for_each_entry_safe(pol
, entry
, tmp
, list
, bydst
) {
355 h
= __addr_hash(&pol
->selector
.daddr
, &pol
->selector
.saddr
,
356 pol
->family
, nhashmask
);
359 hlist_add_head(&pol
->bydst
, ndsttable
+h
);
365 hlist_add_after(entry0
, &pol
->bydst
);
369 if (!hlist_empty(list
)) {
375 static void xfrm_idx_hash_transfer(struct hlist_head
*list
,
376 struct hlist_head
*nidxtable
,
377 unsigned int nhashmask
)
379 struct hlist_node
*entry
, *tmp
;
380 struct xfrm_policy
*pol
;
382 hlist_for_each_entry_safe(pol
, entry
, tmp
, list
, byidx
) {
385 h
= __idx_hash(pol
->index
, nhashmask
);
386 hlist_add_head(&pol
->byidx
, nidxtable
+h
);
390 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask
)
392 return ((old_hmask
+ 1) << 1) - 1;
395 static void xfrm_bydst_resize(struct net
*net
, int dir
)
397 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
398 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
399 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
400 struct hlist_head
*odst
= net
->xfrm
.policy_bydst
[dir
].table
;
401 struct hlist_head
*ndst
= xfrm_hash_alloc(nsize
);
407 write_lock_bh(&xfrm_policy_lock
);
409 for (i
= hmask
; i
>= 0; i
--)
410 xfrm_dst_hash_transfer(odst
+ i
, ndst
, nhashmask
);
412 net
->xfrm
.policy_bydst
[dir
].table
= ndst
;
413 net
->xfrm
.policy_bydst
[dir
].hmask
= nhashmask
;
415 write_unlock_bh(&xfrm_policy_lock
);
417 xfrm_hash_free(odst
, (hmask
+ 1) * sizeof(struct hlist_head
));
420 static void xfrm_byidx_resize(struct net
*net
, int total
)
422 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
423 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
424 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
425 struct hlist_head
*oidx
= net
->xfrm
.policy_byidx
;
426 struct hlist_head
*nidx
= xfrm_hash_alloc(nsize
);
432 write_lock_bh(&xfrm_policy_lock
);
434 for (i
= hmask
; i
>= 0; i
--)
435 xfrm_idx_hash_transfer(oidx
+ i
, nidx
, nhashmask
);
437 net
->xfrm
.policy_byidx
= nidx
;
438 net
->xfrm
.policy_idx_hmask
= nhashmask
;
440 write_unlock_bh(&xfrm_policy_lock
);
442 xfrm_hash_free(oidx
, (hmask
+ 1) * sizeof(struct hlist_head
));
445 static inline int xfrm_bydst_should_resize(struct net
*net
, int dir
, int *total
)
447 unsigned int cnt
= net
->xfrm
.policy_count
[dir
];
448 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
453 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
460 static inline int xfrm_byidx_should_resize(struct net
*net
, int total
)
462 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
464 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
471 void xfrm_spd_getinfo(struct xfrmk_spdinfo
*si
)
473 read_lock_bh(&xfrm_policy_lock
);
474 si
->incnt
= init_net
.xfrm
.policy_count
[XFRM_POLICY_IN
];
475 si
->outcnt
= init_net
.xfrm
.policy_count
[XFRM_POLICY_OUT
];
476 si
->fwdcnt
= init_net
.xfrm
.policy_count
[XFRM_POLICY_FWD
];
477 si
->inscnt
= init_net
.xfrm
.policy_count
[XFRM_POLICY_IN
+XFRM_POLICY_MAX
];
478 si
->outscnt
= init_net
.xfrm
.policy_count
[XFRM_POLICY_OUT
+XFRM_POLICY_MAX
];
479 si
->fwdscnt
= init_net
.xfrm
.policy_count
[XFRM_POLICY_FWD
+XFRM_POLICY_MAX
];
480 si
->spdhcnt
= init_net
.xfrm
.policy_idx_hmask
;
481 si
->spdhmcnt
= xfrm_policy_hashmax
;
482 read_unlock_bh(&xfrm_policy_lock
);
484 EXPORT_SYMBOL(xfrm_spd_getinfo
);
486 static DEFINE_MUTEX(hash_resize_mutex
);
487 static void xfrm_hash_resize(struct work_struct
*work
)
489 struct net
*net
= container_of(work
, struct net
, xfrm
.policy_hash_work
);
492 mutex_lock(&hash_resize_mutex
);
495 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
496 if (xfrm_bydst_should_resize(net
, dir
, &total
))
497 xfrm_bydst_resize(net
, dir
);
499 if (xfrm_byidx_should_resize(net
, total
))
500 xfrm_byidx_resize(net
, total
);
502 mutex_unlock(&hash_resize_mutex
);
505 /* Generate new index... KAME seems to generate them ordered by cost
506 * of an absolute inpredictability of ordering of rules. This will not pass. */
507 static u32
xfrm_gen_index(struct net
*net
, int dir
)
509 static u32 idx_generator
;
512 struct hlist_node
*entry
;
513 struct hlist_head
*list
;
514 struct xfrm_policy
*p
;
518 idx
= (idx_generator
| dir
);
522 list
= net
->xfrm
.policy_byidx
+ idx_hash(net
, idx
);
524 hlist_for_each_entry(p
, entry
, list
, byidx
) {
525 if (p
->index
== idx
) {
535 static inline int selector_cmp(struct xfrm_selector
*s1
, struct xfrm_selector
*s2
)
537 u32
*p1
= (u32
*) s1
;
538 u32
*p2
= (u32
*) s2
;
539 int len
= sizeof(struct xfrm_selector
) / sizeof(u32
);
542 for (i
= 0; i
< len
; i
++) {
550 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
552 struct net
*net
= xp_net(policy
);
553 struct xfrm_policy
*pol
;
554 struct xfrm_policy
*delpol
;
555 struct hlist_head
*chain
;
556 struct hlist_node
*entry
, *newpos
;
557 struct dst_entry
*gc_list
;
559 write_lock_bh(&xfrm_policy_lock
);
560 chain
= policy_hash_bysel(net
, &policy
->selector
, policy
->family
, dir
);
563 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
564 if (pol
->type
== policy
->type
&&
565 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
566 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
569 write_unlock_bh(&xfrm_policy_lock
);
573 if (policy
->priority
> pol
->priority
)
575 } else if (policy
->priority
>= pol
->priority
) {
576 newpos
= &pol
->bydst
;
583 hlist_add_after(newpos
, &policy
->bydst
);
585 hlist_add_head(&policy
->bydst
, chain
);
586 xfrm_pol_hold(policy
);
587 net
->xfrm
.policy_count
[dir
]++;
588 atomic_inc(&flow_cache_genid
);
590 hlist_del(&delpol
->bydst
);
591 hlist_del(&delpol
->byidx
);
592 list_del(&delpol
->walk
.all
);
593 net
->xfrm
.policy_count
[dir
]--;
595 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(net
, dir
);
596 hlist_add_head(&policy
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, policy
->index
));
597 policy
->curlft
.add_time
= get_seconds();
598 policy
->curlft
.use_time
= 0;
599 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
600 xfrm_pol_hold(policy
);
601 list_add(&policy
->walk
.all
, &net
->xfrm
.policy_all
);
602 write_unlock_bh(&xfrm_policy_lock
);
605 xfrm_policy_kill(delpol
);
606 else if (xfrm_bydst_should_resize(net
, dir
, NULL
))
607 schedule_work(&net
->xfrm
.policy_hash_work
);
609 read_lock_bh(&xfrm_policy_lock
);
611 entry
= &policy
->bydst
;
612 hlist_for_each_entry_continue(policy
, entry
, bydst
) {
613 struct dst_entry
*dst
;
615 write_lock(&policy
->lock
);
616 dst
= policy
->bundles
;
618 struct dst_entry
*tail
= dst
;
621 tail
->next
= gc_list
;
624 policy
->bundles
= NULL
;
626 write_unlock(&policy
->lock
);
628 read_unlock_bh(&xfrm_policy_lock
);
631 struct dst_entry
*dst
= gc_list
;
639 EXPORT_SYMBOL(xfrm_policy_insert
);
641 struct xfrm_policy
*xfrm_policy_bysel_ctx(struct net
*net
, u8 type
, int dir
,
642 struct xfrm_selector
*sel
,
643 struct xfrm_sec_ctx
*ctx
, int delete,
646 struct xfrm_policy
*pol
, *ret
;
647 struct hlist_head
*chain
;
648 struct hlist_node
*entry
;
651 write_lock_bh(&xfrm_policy_lock
);
652 chain
= policy_hash_bysel(net
, sel
, sel
->family
, dir
);
654 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
655 if (pol
->type
== type
&&
656 !selector_cmp(sel
, &pol
->selector
) &&
657 xfrm_sec_ctx_match(ctx
, pol
->security
)) {
660 *err
= security_xfrm_policy_delete(
663 write_unlock_bh(&xfrm_policy_lock
);
666 hlist_del(&pol
->bydst
);
667 hlist_del(&pol
->byidx
);
668 list_del(&pol
->walk
.all
);
669 net
->xfrm
.policy_count
[dir
]--;
675 write_unlock_bh(&xfrm_policy_lock
);
678 atomic_inc(&flow_cache_genid
);
679 xfrm_policy_kill(ret
);
683 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
685 struct xfrm_policy
*xfrm_policy_byid(struct net
*net
, u8 type
, int dir
, u32 id
,
686 int delete, int *err
)
688 struct xfrm_policy
*pol
, *ret
;
689 struct hlist_head
*chain
;
690 struct hlist_node
*entry
;
693 if (xfrm_policy_id2dir(id
) != dir
)
697 write_lock_bh(&xfrm_policy_lock
);
698 chain
= net
->xfrm
.policy_byidx
+ idx_hash(net
, id
);
700 hlist_for_each_entry(pol
, entry
, chain
, byidx
) {
701 if (pol
->type
== type
&& pol
->index
== id
) {
704 *err
= security_xfrm_policy_delete(
707 write_unlock_bh(&xfrm_policy_lock
);
710 hlist_del(&pol
->bydst
);
711 hlist_del(&pol
->byidx
);
712 list_del(&pol
->walk
.all
);
713 net
->xfrm
.policy_count
[dir
]--;
719 write_unlock_bh(&xfrm_policy_lock
);
722 atomic_inc(&flow_cache_genid
);
723 xfrm_policy_kill(ret
);
727 EXPORT_SYMBOL(xfrm_policy_byid
);
729 #ifdef CONFIG_SECURITY_NETWORK_XFRM
731 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
735 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
736 struct xfrm_policy
*pol
;
737 struct hlist_node
*entry
;
740 hlist_for_each_entry(pol
, entry
,
741 &net
->xfrm
.policy_inexact
[dir
], bydst
) {
742 if (pol
->type
!= type
)
744 err
= security_xfrm_policy_delete(pol
->security
);
746 xfrm_audit_policy_delete(pol
, 0,
747 audit_info
->loginuid
,
748 audit_info
->sessionid
,
753 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
754 hlist_for_each_entry(pol
, entry
,
755 net
->xfrm
.policy_bydst
[dir
].table
+ i
,
757 if (pol
->type
!= type
)
759 err
= security_xfrm_policy_delete(
762 xfrm_audit_policy_delete(pol
, 0,
763 audit_info
->loginuid
,
764 audit_info
->sessionid
,
775 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
781 int xfrm_policy_flush(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
785 write_lock_bh(&xfrm_policy_lock
);
787 err
= xfrm_policy_flush_secctx_check(net
, type
, audit_info
);
791 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
792 struct xfrm_policy
*pol
;
793 struct hlist_node
*entry
;
798 hlist_for_each_entry(pol
, entry
,
799 &net
->xfrm
.policy_inexact
[dir
], bydst
) {
800 if (pol
->type
!= type
)
802 hlist_del(&pol
->bydst
);
803 hlist_del(&pol
->byidx
);
804 write_unlock_bh(&xfrm_policy_lock
);
806 xfrm_audit_policy_delete(pol
, 1, audit_info
->loginuid
,
807 audit_info
->sessionid
,
810 xfrm_policy_kill(pol
);
813 write_lock_bh(&xfrm_policy_lock
);
817 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
819 hlist_for_each_entry(pol
, entry
,
820 net
->xfrm
.policy_bydst
[dir
].table
+ i
,
822 if (pol
->type
!= type
)
824 hlist_del(&pol
->bydst
);
825 hlist_del(&pol
->byidx
);
826 list_del(&pol
->walk
.all
);
827 write_unlock_bh(&xfrm_policy_lock
);
829 xfrm_audit_policy_delete(pol
, 1,
830 audit_info
->loginuid
,
831 audit_info
->sessionid
,
833 xfrm_policy_kill(pol
);
836 write_lock_bh(&xfrm_policy_lock
);
841 net
->xfrm
.policy_count
[dir
] -= killed
;
843 atomic_inc(&flow_cache_genid
);
845 write_unlock_bh(&xfrm_policy_lock
);
848 EXPORT_SYMBOL(xfrm_policy_flush
);
850 int xfrm_policy_walk(struct net
*net
, struct xfrm_policy_walk
*walk
,
851 int (*func
)(struct xfrm_policy
*, int, int, void*),
854 struct xfrm_policy
*pol
;
855 struct xfrm_policy_walk_entry
*x
;
858 if (walk
->type
>= XFRM_POLICY_TYPE_MAX
&&
859 walk
->type
!= XFRM_POLICY_TYPE_ANY
)
862 if (list_empty(&walk
->walk
.all
) && walk
->seq
!= 0)
865 write_lock_bh(&xfrm_policy_lock
);
866 if (list_empty(&walk
->walk
.all
))
867 x
= list_first_entry(&net
->xfrm
.policy_all
, struct xfrm_policy_walk_entry
, all
);
869 x
= list_entry(&walk
->walk
.all
, struct xfrm_policy_walk_entry
, all
);
870 list_for_each_entry_from(x
, &net
->xfrm
.policy_all
, all
) {
873 pol
= container_of(x
, struct xfrm_policy
, walk
);
874 if (walk
->type
!= XFRM_POLICY_TYPE_ANY
&&
875 walk
->type
!= pol
->type
)
877 error
= func(pol
, xfrm_policy_id2dir(pol
->index
),
880 list_move_tail(&walk
->walk
.all
, &x
->all
);
885 if (walk
->seq
== 0) {
889 list_del_init(&walk
->walk
.all
);
891 write_unlock_bh(&xfrm_policy_lock
);
894 EXPORT_SYMBOL(xfrm_policy_walk
);
896 void xfrm_policy_walk_init(struct xfrm_policy_walk
*walk
, u8 type
)
898 INIT_LIST_HEAD(&walk
->walk
.all
);
903 EXPORT_SYMBOL(xfrm_policy_walk_init
);
905 void xfrm_policy_walk_done(struct xfrm_policy_walk
*walk
)
907 if (list_empty(&walk
->walk
.all
))
910 write_lock_bh(&xfrm_policy_lock
);
911 list_del(&walk
->walk
.all
);
912 write_unlock_bh(&xfrm_policy_lock
);
914 EXPORT_SYMBOL(xfrm_policy_walk_done
);
917 * Find policy to apply to this flow.
919 * Returns 0 if policy found, else an -errno.
921 static int xfrm_policy_match(struct xfrm_policy
*pol
, struct flowi
*fl
,
922 u8 type
, u16 family
, int dir
)
924 struct xfrm_selector
*sel
= &pol
->selector
;
925 int match
, ret
= -ESRCH
;
927 if (pol
->family
!= family
||
931 match
= xfrm_selector_match(sel
, fl
, family
);
933 ret
= security_xfrm_policy_lookup(pol
->security
, fl
->secid
,
939 static struct xfrm_policy
*xfrm_policy_lookup_bytype(struct net
*net
, u8 type
,
944 struct xfrm_policy
*pol
, *ret
;
945 xfrm_address_t
*daddr
, *saddr
;
946 struct hlist_node
*entry
;
947 struct hlist_head
*chain
;
950 daddr
= xfrm_flowi_daddr(fl
, family
);
951 saddr
= xfrm_flowi_saddr(fl
, family
);
952 if (unlikely(!daddr
|| !saddr
))
955 read_lock_bh(&xfrm_policy_lock
);
956 chain
= policy_hash_direct(net
, daddr
, saddr
, family
, dir
);
958 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
959 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
);
969 priority
= ret
->priority
;
973 chain
= &net
->xfrm
.policy_inexact
[dir
];
974 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
975 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
);
983 } else if (pol
->priority
< priority
) {
991 read_unlock_bh(&xfrm_policy_lock
);
996 static int xfrm_policy_lookup(struct net
*net
, struct flowi
*fl
, u16 family
,
997 u8 dir
, void **objp
, atomic_t
**obj_refp
)
999 struct xfrm_policy
*pol
;
1002 #ifdef CONFIG_XFRM_SUB_POLICY
1003 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_SUB
, fl
, family
, dir
);
1011 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
, fl
, family
, dir
);
1016 #ifdef CONFIG_XFRM_SUB_POLICY
1019 if ((*objp
= (void *) pol
) != NULL
)
1020 *obj_refp
= &pol
->refcnt
;
1024 static inline int policy_to_flow_dir(int dir
)
1026 if (XFRM_POLICY_IN
== FLOW_DIR_IN
&&
1027 XFRM_POLICY_OUT
== FLOW_DIR_OUT
&&
1028 XFRM_POLICY_FWD
== FLOW_DIR_FWD
)
1032 case XFRM_POLICY_IN
:
1034 case XFRM_POLICY_OUT
:
1035 return FLOW_DIR_OUT
;
1036 case XFRM_POLICY_FWD
:
1037 return FLOW_DIR_FWD
;
1041 static struct xfrm_policy
*xfrm_sk_policy_lookup(struct sock
*sk
, int dir
, struct flowi
*fl
)
1043 struct xfrm_policy
*pol
;
1045 read_lock_bh(&xfrm_policy_lock
);
1046 if ((pol
= sk
->sk_policy
[dir
]) != NULL
) {
1047 int match
= xfrm_selector_match(&pol
->selector
, fl
,
1052 err
= security_xfrm_policy_lookup(pol
->security
,
1054 policy_to_flow_dir(dir
));
1057 else if (err
== -ESRCH
)
1064 read_unlock_bh(&xfrm_policy_lock
);
1068 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
1070 struct net
*net
= xp_net(pol
);
1071 struct hlist_head
*chain
= policy_hash_bysel(net
, &pol
->selector
,
1074 list_add(&pol
->walk
.all
, &net
->xfrm
.policy_all
);
1075 hlist_add_head(&pol
->bydst
, chain
);
1076 hlist_add_head(&pol
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, pol
->index
));
1077 net
->xfrm
.policy_count
[dir
]++;
1080 if (xfrm_bydst_should_resize(net
, dir
, NULL
))
1081 schedule_work(&net
->xfrm
.policy_hash_work
);
1084 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
1087 struct net
*net
= xp_net(pol
);
1089 if (hlist_unhashed(&pol
->bydst
))
1092 hlist_del(&pol
->bydst
);
1093 hlist_del(&pol
->byidx
);
1094 list_del(&pol
->walk
.all
);
1095 net
->xfrm
.policy_count
[dir
]--;
1100 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
1102 write_lock_bh(&xfrm_policy_lock
);
1103 pol
= __xfrm_policy_unlink(pol
, dir
);
1104 write_unlock_bh(&xfrm_policy_lock
);
1106 if (dir
< XFRM_POLICY_MAX
)
1107 atomic_inc(&flow_cache_genid
);
1108 xfrm_policy_kill(pol
);
1113 EXPORT_SYMBOL(xfrm_policy_delete
);
1115 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
1117 struct net
*net
= xp_net(pol
);
1118 struct xfrm_policy
*old_pol
;
1120 #ifdef CONFIG_XFRM_SUB_POLICY
1121 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
1125 write_lock_bh(&xfrm_policy_lock
);
1126 old_pol
= sk
->sk_policy
[dir
];
1127 sk
->sk_policy
[dir
] = pol
;
1129 pol
->curlft
.add_time
= get_seconds();
1130 pol
->index
= xfrm_gen_index(net
, XFRM_POLICY_MAX
+dir
);
1131 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+dir
);
1134 __xfrm_policy_unlink(old_pol
, XFRM_POLICY_MAX
+dir
);
1135 write_unlock_bh(&xfrm_policy_lock
);
1138 xfrm_policy_kill(old_pol
);
1143 static struct xfrm_policy
*clone_policy(struct xfrm_policy
*old
, int dir
)
1145 struct xfrm_policy
*newp
= xfrm_policy_alloc(xp_net(old
), GFP_ATOMIC
);
1148 newp
->selector
= old
->selector
;
1149 if (security_xfrm_policy_clone(old
->security
,
1152 return NULL
; /* ENOMEM */
1154 newp
->lft
= old
->lft
;
1155 newp
->curlft
= old
->curlft
;
1156 newp
->action
= old
->action
;
1157 newp
->flags
= old
->flags
;
1158 newp
->xfrm_nr
= old
->xfrm_nr
;
1159 newp
->index
= old
->index
;
1160 newp
->type
= old
->type
;
1161 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
1162 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
1163 write_lock_bh(&xfrm_policy_lock
);
1164 __xfrm_policy_link(newp
, XFRM_POLICY_MAX
+dir
);
1165 write_unlock_bh(&xfrm_policy_lock
);
1171 int __xfrm_sk_clone_policy(struct sock
*sk
)
1173 struct xfrm_policy
*p0
= sk
->sk_policy
[0],
1174 *p1
= sk
->sk_policy
[1];
1176 sk
->sk_policy
[0] = sk
->sk_policy
[1] = NULL
;
1177 if (p0
&& (sk
->sk_policy
[0] = clone_policy(p0
, 0)) == NULL
)
1179 if (p1
&& (sk
->sk_policy
[1] = clone_policy(p1
, 1)) == NULL
)
1185 xfrm_get_saddr(struct net
*net
, xfrm_address_t
*local
, xfrm_address_t
*remote
,
1186 unsigned short family
)
1189 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1191 if (unlikely(afinfo
== NULL
))
1193 err
= afinfo
->get_saddr(net
, local
, remote
);
1194 xfrm_policy_put_afinfo(afinfo
);
1198 /* Resolve list of templates for the flow, given policy. */
1201 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, struct flowi
*fl
,
1202 struct xfrm_state
**xfrm
,
1203 unsigned short family
)
1205 struct net
*net
= xp_net(policy
);
1208 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
1209 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
1212 for (nx
=0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
1213 struct xfrm_state
*x
;
1214 xfrm_address_t
*remote
= daddr
;
1215 xfrm_address_t
*local
= saddr
;
1216 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
1218 if (tmpl
->mode
== XFRM_MODE_TUNNEL
||
1219 tmpl
->mode
== XFRM_MODE_BEET
) {
1220 remote
= &tmpl
->id
.daddr
;
1221 local
= &tmpl
->saddr
;
1222 family
= tmpl
->encap_family
;
1223 if (xfrm_addr_any(local
, family
)) {
1224 error
= xfrm_get_saddr(net
, &tmp
, remote
, family
);
1231 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
, family
);
1233 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
1240 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
1244 else if (error
== -ESRCH
)
1247 if (!tmpl
->optional
)
1253 for (nx
--; nx
>=0; nx
--)
1254 xfrm_state_put(xfrm
[nx
]);
1259 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, struct flowi
*fl
,
1260 struct xfrm_state
**xfrm
,
1261 unsigned short family
)
1263 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
1264 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
1270 for (i
= 0; i
< npols
; i
++) {
1271 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
1276 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
1284 /* found states are sorted for outbound processing */
1286 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
1291 for (cnx
--; cnx
>=0; cnx
--)
1292 xfrm_state_put(tpp
[cnx
]);
1297 /* Check that the bundle accepts the flow and its components are
1301 static struct dst_entry
*
1302 xfrm_find_bundle(struct flowi
*fl
, struct xfrm_policy
*policy
, unsigned short family
)
1304 struct dst_entry
*x
;
1305 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1306 if (unlikely(afinfo
== NULL
))
1307 return ERR_PTR(-EINVAL
);
1308 x
= afinfo
->find_bundle(fl
, policy
);
1309 xfrm_policy_put_afinfo(afinfo
);
1313 static inline int xfrm_get_tos(struct flowi
*fl
, int family
)
1315 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1321 tos
= afinfo
->get_tos(fl
);
1323 xfrm_policy_put_afinfo(afinfo
);
1328 static inline struct xfrm_dst
*xfrm_alloc_dst(int family
)
1330 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1331 struct xfrm_dst
*xdst
;
1334 return ERR_PTR(-EINVAL
);
1336 xdst
= dst_alloc(afinfo
->dst_ops
) ?: ERR_PTR(-ENOBUFS
);
1338 xfrm_policy_put_afinfo(afinfo
);
1343 static inline int xfrm_init_path(struct xfrm_dst
*path
, struct dst_entry
*dst
,
1346 struct xfrm_policy_afinfo
*afinfo
=
1347 xfrm_policy_get_afinfo(dst
->ops
->family
);
1353 err
= afinfo
->init_path(path
, dst
, nfheader_len
);
1355 xfrm_policy_put_afinfo(afinfo
);
1360 static inline int xfrm_fill_dst(struct xfrm_dst
*xdst
, struct net_device
*dev
)
1362 struct xfrm_policy_afinfo
*afinfo
=
1363 xfrm_policy_get_afinfo(xdst
->u
.dst
.ops
->family
);
1369 err
= afinfo
->fill_dst(xdst
, dev
);
1371 xfrm_policy_put_afinfo(afinfo
);
1376 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1377 * all the metrics... Shortly, bundle a bundle.
1380 static struct dst_entry
*xfrm_bundle_create(struct xfrm_policy
*policy
,
1381 struct xfrm_state
**xfrm
, int nx
,
1383 struct dst_entry
*dst
)
1385 unsigned long now
= jiffies
;
1386 struct net_device
*dev
;
1387 struct dst_entry
*dst_prev
= NULL
;
1388 struct dst_entry
*dst0
= NULL
;
1392 int nfheader_len
= 0;
1393 int trailer_len
= 0;
1395 int family
= policy
->selector
.family
;
1396 xfrm_address_t saddr
, daddr
;
1398 xfrm_flowi_addr_get(fl
, &saddr
, &daddr
, family
);
1400 tos
= xfrm_get_tos(fl
, family
);
1407 for (; i
< nx
; i
++) {
1408 struct xfrm_dst
*xdst
= xfrm_alloc_dst(family
);
1409 struct dst_entry
*dst1
= &xdst
->u
.dst
;
1411 err
= PTR_ERR(xdst
);
1420 dst_prev
->child
= dst_clone(dst1
);
1421 dst1
->flags
|= DST_NOHASH
;
1425 memcpy(&dst1
->metrics
, &dst
->metrics
, sizeof(dst
->metrics
));
1427 if (xfrm
[i
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1428 family
= xfrm
[i
]->props
.family
;
1429 dst
= xfrm_dst_lookup(xfrm
[i
], tos
, &saddr
, &daddr
,
1437 dst1
->xfrm
= xfrm
[i
];
1438 xdst
->genid
= xfrm
[i
]->genid
;
1440 dst1
->obsolete
= -1;
1441 dst1
->flags
|= DST_HOST
;
1442 dst1
->lastuse
= now
;
1444 dst1
->input
= dst_discard
;
1445 dst1
->output
= xfrm
[i
]->outer_mode
->afinfo
->output
;
1447 dst1
->next
= dst_prev
;
1450 header_len
+= xfrm
[i
]->props
.header_len
;
1451 if (xfrm
[i
]->type
->flags
& XFRM_TYPE_NON_FRAGMENT
)
1452 nfheader_len
+= xfrm
[i
]->props
.header_len
;
1453 trailer_len
+= xfrm
[i
]->props
.trailer_len
;
1456 dst_prev
->child
= dst
;
1464 /* Copy neighbout for reachability confirmation */
1465 dst0
->neighbour
= neigh_clone(dst
->neighbour
);
1467 xfrm_init_path((struct xfrm_dst
*)dst0
, dst
, nfheader_len
);
1468 xfrm_init_pmtu(dst_prev
);
1470 for (dst_prev
= dst0
; dst_prev
!= dst
; dst_prev
= dst_prev
->child
) {
1471 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst_prev
;
1473 err
= xfrm_fill_dst(xdst
, dev
);
1477 dst_prev
->header_len
= header_len
;
1478 dst_prev
->trailer_len
= trailer_len
;
1479 header_len
-= xdst
->u
.dst
.xfrm
->props
.header_len
;
1480 trailer_len
-= xdst
->u
.dst
.xfrm
->props
.trailer_len
;
1488 xfrm_state_put(xfrm
[i
]);
1492 dst0
= ERR_PTR(err
);
1497 xfrm_dst_alloc_copy(void **target
, void *src
, int size
)
1500 *target
= kmalloc(size
, GFP_ATOMIC
);
1504 memcpy(*target
, src
, size
);
1509 xfrm_dst_update_parent(struct dst_entry
*dst
, struct xfrm_selector
*sel
)
1511 #ifdef CONFIG_XFRM_SUB_POLICY
1512 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1513 return xfrm_dst_alloc_copy((void **)&(xdst
->partner
),
1521 xfrm_dst_update_origin(struct dst_entry
*dst
, struct flowi
*fl
)
1523 #ifdef CONFIG_XFRM_SUB_POLICY
1524 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1525 return xfrm_dst_alloc_copy((void **)&(xdst
->origin
), fl
, sizeof(*fl
));
1531 static int stale_bundle(struct dst_entry
*dst
);
1533 /* Main function: finds/creates a bundle for given flow.
1535 * At the moment we eat a raw IP route. Mostly to speed up lookups
1536 * on interfaces with disabled IPsec.
1538 int __xfrm_lookup(struct net
*net
, struct dst_entry
**dst_p
, struct flowi
*fl
,
1539 struct sock
*sk
, int flags
)
1541 struct xfrm_policy
*policy
;
1542 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
1547 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
1548 struct dst_entry
*dst
, *dst_orig
= *dst_p
;
1553 u8 dir
= policy_to_flow_dir(XFRM_POLICY_OUT
);
1556 genid
= atomic_read(&flow_cache_genid
);
1558 for (pi
= 0; pi
< ARRAY_SIZE(pols
); pi
++)
1564 if (sk
&& sk
->sk_policy
[XFRM_POLICY_OUT
]) {
1565 policy
= xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
);
1566 err
= PTR_ERR(policy
);
1567 if (IS_ERR(policy
)) {
1568 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
1574 /* To accelerate a bit... */
1575 if ((dst_orig
->flags
& DST_NOXFRM
) ||
1576 !net
->xfrm
.policy_count
[XFRM_POLICY_OUT
])
1579 policy
= flow_cache_lookup(net
, fl
, dst_orig
->ops
->family
,
1580 dir
, xfrm_policy_lookup
);
1581 err
= PTR_ERR(policy
);
1582 if (IS_ERR(policy
)) {
1583 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
1591 family
= dst_orig
->ops
->family
;
1594 xfrm_nr
+= pols
[0]->xfrm_nr
;
1597 if ((flags
& XFRM_LOOKUP_ICMP
) && !(policy
->flags
& XFRM_POLICY_ICMP
))
1600 policy
->curlft
.use_time
= get_seconds();
1602 switch (policy
->action
) {
1604 case XFRM_POLICY_BLOCK
:
1605 /* Prohibit the flow */
1606 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
1610 case XFRM_POLICY_ALLOW
:
1611 #ifndef CONFIG_XFRM_SUB_POLICY
1612 if (policy
->xfrm_nr
== 0) {
1613 /* Flow passes not transformed. */
1614 xfrm_pol_put(policy
);
1619 /* Try to find matching bundle.
1621 * LATER: help from flow cache. It is optional, this
1622 * is required only for output policy.
1624 dst
= xfrm_find_bundle(fl
, policy
, family
);
1626 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLECHECKERROR
);
1634 #ifdef CONFIG_XFRM_SUB_POLICY
1635 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
1636 pols
[1] = xfrm_policy_lookup_bytype(net
,
1637 XFRM_POLICY_TYPE_MAIN
,
1641 if (IS_ERR(pols
[1])) {
1642 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
1643 err
= PTR_ERR(pols
[1]);
1646 if (pols
[1]->action
== XFRM_POLICY_BLOCK
) {
1647 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
1652 xfrm_nr
+= pols
[1]->xfrm_nr
;
1657 * Because neither flowi nor bundle information knows about
1658 * transformation template size. On more than one policy usage
1659 * we can realize whether all of them is bypass or not after
1660 * they are searched. See above not-transformed bypass
1661 * is surrounded by non-sub policy configuration, too.
1664 /* Flow passes not transformed. */
1665 xfrm_pols_put(pols
, npols
);
1670 nx
= xfrm_tmpl_resolve(pols
, npols
, fl
, xfrm
, family
);
1672 if (unlikely(nx
<0)) {
1674 if (err
== -EAGAIN
&& sysctl_xfrm_larval_drop
) {
1675 /* EREMOTE tells the caller to generate
1676 * a one-shot blackhole route.
1678 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
1679 xfrm_pol_put(policy
);
1682 if (err
== -EAGAIN
&& (flags
& XFRM_LOOKUP_WAIT
)) {
1683 DECLARE_WAITQUEUE(wait
, current
);
1685 add_wait_queue(&net
->xfrm
.km_waitq
, &wait
);
1686 set_current_state(TASK_INTERRUPTIBLE
);
1688 set_current_state(TASK_RUNNING
);
1689 remove_wait_queue(&net
->xfrm
.km_waitq
, &wait
);
1691 nx
= xfrm_tmpl_resolve(pols
, npols
, fl
, xfrm
, family
);
1693 if (nx
== -EAGAIN
&& signal_pending(current
)) {
1694 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
1698 if (nx
== -EAGAIN
||
1699 genid
!= atomic_read(&flow_cache_genid
)) {
1700 xfrm_pols_put(pols
, npols
);
1706 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
1711 /* Flow passes not transformed. */
1712 xfrm_pols_put(pols
, npols
);
1716 dst
= xfrm_bundle_create(policy
, xfrm
, nx
, fl
, dst_orig
);
1719 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLEGENERROR
);
1723 for (pi
= 0; pi
< npols
; pi
++) {
1724 read_lock_bh(&pols
[pi
]->lock
);
1725 pol_dead
|= pols
[pi
]->walk
.dead
;
1726 read_unlock_bh(&pols
[pi
]->lock
);
1729 write_lock_bh(&policy
->lock
);
1730 if (unlikely(pol_dead
|| stale_bundle(dst
))) {
1731 /* Wow! While we worked on resolving, this
1732 * policy has gone. Retry. It is not paranoia,
1733 * we just cannot enlist new bundle to dead object.
1734 * We can't enlist stable bundles either.
1736 write_unlock_bh(&policy
->lock
);
1740 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLDEAD
);
1742 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLECHECKERROR
);
1743 err
= -EHOSTUNREACH
;
1748 err
= xfrm_dst_update_parent(dst
, &pols
[1]->selector
);
1750 err
= xfrm_dst_update_origin(dst
, fl
);
1751 if (unlikely(err
)) {
1752 write_unlock_bh(&policy
->lock
);
1754 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLECHECKERROR
);
1758 dst
->next
= policy
->bundles
;
1759 policy
->bundles
= dst
;
1761 write_unlock_bh(&policy
->lock
);
1764 dst_release(dst_orig
);
1765 xfrm_pols_put(pols
, npols
);
1769 xfrm_pols_put(pols
, npols
);
1771 dst_release(dst_orig
);
1777 if (flags
& XFRM_LOOKUP_ICMP
)
1781 EXPORT_SYMBOL(__xfrm_lookup
);
1783 int xfrm_lookup(struct net
*net
, struct dst_entry
**dst_p
, struct flowi
*fl
,
1784 struct sock
*sk
, int flags
)
1786 int err
= __xfrm_lookup(net
, dst_p
, fl
, sk
, flags
);
1788 if (err
== -EREMOTE
) {
1789 dst_release(*dst_p
);
1796 EXPORT_SYMBOL(xfrm_lookup
);
1799 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, struct flowi
*fl
)
1801 struct xfrm_state
*x
;
1803 if (!skb
->sp
|| idx
< 0 || idx
>= skb
->sp
->len
)
1805 x
= skb
->sp
->xvec
[idx
];
1806 if (!x
->type
->reject
)
1808 return x
->type
->reject(x
, skb
, fl
);
1811 /* When skb is transformed back to its "native" form, we have to
1812 * check policy restrictions. At the moment we make this in maximally
1813 * stupid way. Shame on me. :-) Of course, connected sockets must
1814 * have policy cached at them.
1818 xfrm_state_ok(struct xfrm_tmpl
*tmpl
, struct xfrm_state
*x
,
1819 unsigned short family
)
1821 if (xfrm_state_kern(x
))
1822 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, tmpl
->encap_family
);
1823 return x
->id
.proto
== tmpl
->id
.proto
&&
1824 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
1825 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
1826 x
->props
.mode
== tmpl
->mode
&&
1827 (tmpl
->allalgs
|| (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
1828 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
1829 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
1830 xfrm_state_addr_cmp(tmpl
, x
, family
));
1834 * 0 or more than 0 is returned when validation is succeeded (either bypass
1835 * because of optional transport mode, or next index of the mathced secpath
1836 * state with the template.
1837 * -1 is returned when no matching template is found.
1838 * Otherwise "-2 - errored_index" is returned.
1841 xfrm_policy_ok(struct xfrm_tmpl
*tmpl
, struct sec_path
*sp
, int start
,
1842 unsigned short family
)
1846 if (tmpl
->optional
) {
1847 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
1851 for (; idx
< sp
->len
; idx
++) {
1852 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
))
1854 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1863 int __xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
,
1864 unsigned int family
, int reverse
)
1866 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1869 if (unlikely(afinfo
== NULL
))
1870 return -EAFNOSUPPORT
;
1872 afinfo
->decode_session(skb
, fl
, reverse
);
1873 err
= security_xfrm_decode_session(skb
, &fl
->secid
);
1874 xfrm_policy_put_afinfo(afinfo
);
1877 EXPORT_SYMBOL(__xfrm_decode_session
);
1879 static inline int secpath_has_nontransport(struct sec_path
*sp
, int k
, int *idxp
)
1881 for (; k
< sp
->len
; k
++) {
1882 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1891 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
1892 unsigned short family
)
1894 struct net
*net
= dev_net(skb
->dev
);
1895 struct xfrm_policy
*pol
;
1896 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
1905 reverse
= dir
& ~XFRM_POLICY_MASK
;
1906 dir
&= XFRM_POLICY_MASK
;
1907 fl_dir
= policy_to_flow_dir(dir
);
1909 if (__xfrm_decode_session(skb
, &fl
, family
, reverse
) < 0) {
1910 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
1914 nf_nat_decode_session(skb
, &fl
, family
);
1916 /* First, check used SA against their selectors. */
1920 for (i
=skb
->sp
->len
-1; i
>=0; i
--) {
1921 struct xfrm_state
*x
= skb
->sp
->xvec
[i
];
1922 if (!xfrm_selector_match(&x
->sel
, &fl
, family
)) {
1923 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
1930 if (sk
&& sk
->sk_policy
[dir
]) {
1931 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
);
1933 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
1939 pol
= flow_cache_lookup(net
, &fl
, family
, fl_dir
,
1940 xfrm_policy_lookup
);
1943 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
1948 if (skb
->sp
&& secpath_has_nontransport(skb
->sp
, 0, &xerr_idx
)) {
1949 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
1950 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
1956 pol
->curlft
.use_time
= get_seconds();
1960 #ifdef CONFIG_XFRM_SUB_POLICY
1961 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
1962 pols
[1] = xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
,
1966 if (IS_ERR(pols
[1])) {
1967 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
1970 pols
[1]->curlft
.use_time
= get_seconds();
1976 if (pol
->action
== XFRM_POLICY_ALLOW
) {
1977 struct sec_path
*sp
;
1978 static struct sec_path dummy
;
1979 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
1980 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
1981 struct xfrm_tmpl
**tpp
= tp
;
1985 if ((sp
= skb
->sp
) == NULL
)
1988 for (pi
= 0; pi
< npols
; pi
++) {
1989 if (pols
[pi
] != pol
&&
1990 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
) {
1991 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
1994 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
1995 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
1998 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
1999 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
2003 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
);
2007 /* For each tunnel xfrm, find the first matching tmpl.
2008 * For each tmpl before that, find corresponding xfrm.
2009 * Order is _important_. Later we will implement
2010 * some barriers, but at the moment barriers
2011 * are implied between each two transformations.
2013 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
2014 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
);
2017 /* "-2 - errored_index" returned */
2019 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
2024 if (secpath_has_nontransport(sp
, k
, &xerr_idx
)) {
2025 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
2029 xfrm_pols_put(pols
, npols
);
2032 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
2035 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
2037 xfrm_pols_put(pols
, npols
);
2040 EXPORT_SYMBOL(__xfrm_policy_check
);
2042 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
2044 struct net
*net
= dev_net(skb
->dev
);
2047 if (xfrm_decode_session(skb
, &fl
, family
) < 0) {
2048 /* XXX: we should have something like FWDHDRERROR here. */
2049 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
2053 return xfrm_lookup(net
, &skb
->dst
, &fl
, NULL
, 0) == 0;
2055 EXPORT_SYMBOL(__xfrm_route_forward
);
2057 /* Optimize later using cookies and generation ids. */
2059 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
2061 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2062 * to "-1" to force all XFRM destinations to get validated by
2063 * dst_ops->check on every use. We do this because when a
2064 * normal route referenced by an XFRM dst is obsoleted we do
2065 * not go looking around for all parent referencing XFRM dsts
2066 * so that we can invalidate them. It is just too much work.
2067 * Instead we make the checks here on every use. For example:
2069 * XFRM dst A --> IPv4 dst X
2071 * X is the "xdst->route" of A (X is also the "dst->path" of A
2072 * in this example). If X is marked obsolete, "A" will not
2073 * notice. That's what we are validating here via the
2074 * stale_bundle() check.
2076 * When a policy's bundle is pruned, we dst_free() the XFRM
2077 * dst which causes it's ->obsolete field to be set to a
2078 * positive non-zero integer. If an XFRM dst has been pruned
2079 * like this, we want to force a new route lookup.
2081 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
2087 static int stale_bundle(struct dst_entry
*dst
)
2089 return !xfrm_bundle_ok(NULL
, (struct xfrm_dst
*)dst
, NULL
, AF_UNSPEC
, 0);
2092 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
2094 while ((dst
= dst
->child
) && dst
->xfrm
&& dst
->dev
== dev
) {
2095 dst
->dev
= dev_net(dev
)->loopback_dev
;
2100 EXPORT_SYMBOL(xfrm_dst_ifdown
);
2102 static void xfrm_link_failure(struct sk_buff
*skb
)
2104 /* Impossible. Such dst must be popped before reaches point of failure. */
2108 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
2111 if (dst
->obsolete
) {
2119 static void prune_one_bundle(struct xfrm_policy
*pol
, int (*func
)(struct dst_entry
*), struct dst_entry
**gc_list_p
)
2121 struct dst_entry
*dst
, **dstp
;
2123 write_lock(&pol
->lock
);
2124 dstp
= &pol
->bundles
;
2125 while ((dst
=*dstp
) != NULL
) {
2128 dst
->next
= *gc_list_p
;
2134 write_unlock(&pol
->lock
);
2137 static void xfrm_prune_bundles(struct net
*net
, int (*func
)(struct dst_entry
*))
2139 struct dst_entry
*gc_list
= NULL
;
2142 read_lock_bh(&xfrm_policy_lock
);
2143 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2144 struct xfrm_policy
*pol
;
2145 struct hlist_node
*entry
;
2146 struct hlist_head
*table
;
2149 hlist_for_each_entry(pol
, entry
,
2150 &net
->xfrm
.policy_inexact
[dir
], bydst
)
2151 prune_one_bundle(pol
, func
, &gc_list
);
2153 table
= net
->xfrm
.policy_bydst
[dir
].table
;
2154 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
2155 hlist_for_each_entry(pol
, entry
, table
+ i
, bydst
)
2156 prune_one_bundle(pol
, func
, &gc_list
);
2159 read_unlock_bh(&xfrm_policy_lock
);
2162 struct dst_entry
*dst
= gc_list
;
2163 gc_list
= dst
->next
;
2168 static int unused_bundle(struct dst_entry
*dst
)
2170 return !atomic_read(&dst
->__refcnt
);
2173 static void __xfrm_garbage_collect(struct net
*net
)
2175 xfrm_prune_bundles(net
, unused_bundle
);
2178 static int xfrm_flush_bundles(struct net
*net
)
2180 xfrm_prune_bundles(net
, stale_bundle
);
2184 static void xfrm_init_pmtu(struct dst_entry
*dst
)
2187 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
2188 u32 pmtu
, route_mtu_cached
;
2190 pmtu
= dst_mtu(dst
->child
);
2191 xdst
->child_mtu_cached
= pmtu
;
2193 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
2195 route_mtu_cached
= dst_mtu(xdst
->route
);
2196 xdst
->route_mtu_cached
= route_mtu_cached
;
2198 if (pmtu
> route_mtu_cached
)
2199 pmtu
= route_mtu_cached
;
2201 dst
->metrics
[RTAX_MTU
-1] = pmtu
;
2202 } while ((dst
= dst
->next
));
2205 /* Check that the bundle accepts the flow and its components are
2209 int xfrm_bundle_ok(struct xfrm_policy
*pol
, struct xfrm_dst
*first
,
2210 struct flowi
*fl
, int family
, int strict
)
2212 struct dst_entry
*dst
= &first
->u
.dst
;
2213 struct xfrm_dst
*last
;
2216 if (!dst_check(dst
->path
, ((struct xfrm_dst
*)dst
)->path_cookie
) ||
2217 (dst
->dev
&& !netif_running(dst
->dev
)))
2219 #ifdef CONFIG_XFRM_SUB_POLICY
2221 if (first
->origin
&& !flow_cache_uli_match(first
->origin
, fl
))
2223 if (first
->partner
&&
2224 !xfrm_selector_match(first
->partner
, fl
, family
))
2232 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
2234 if (fl
&& !xfrm_selector_match(&dst
->xfrm
->sel
, fl
, family
))
2237 !security_xfrm_state_pol_flow_match(dst
->xfrm
, pol
, fl
))
2239 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
2241 if (xdst
->genid
!= dst
->xfrm
->genid
)
2245 !(dst
->xfrm
->outer_mode
->flags
& XFRM_MODE_FLAG_TUNNEL
) &&
2246 !xfrm_state_addr_flow_check(dst
->xfrm
, fl
, family
))
2249 mtu
= dst_mtu(dst
->child
);
2250 if (xdst
->child_mtu_cached
!= mtu
) {
2252 xdst
->child_mtu_cached
= mtu
;
2255 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
2257 mtu
= dst_mtu(xdst
->route
);
2258 if (xdst
->route_mtu_cached
!= mtu
) {
2260 xdst
->route_mtu_cached
= mtu
;
2264 } while (dst
->xfrm
);
2269 mtu
= last
->child_mtu_cached
;
2273 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
2274 if (mtu
> last
->route_mtu_cached
)
2275 mtu
= last
->route_mtu_cached
;
2276 dst
->metrics
[RTAX_MTU
-1] = mtu
;
2281 last
= (struct xfrm_dst
*)last
->u
.dst
.next
;
2282 last
->child_mtu_cached
= mtu
;
2288 EXPORT_SYMBOL(xfrm_bundle_ok
);
2290 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2293 if (unlikely(afinfo
== NULL
))
2295 if (unlikely(afinfo
->family
>= NPROTO
))
2296 return -EAFNOSUPPORT
;
2297 write_lock_bh(&xfrm_policy_afinfo_lock
);
2298 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
))
2301 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
2302 if (likely(dst_ops
->kmem_cachep
== NULL
))
2303 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
2304 if (likely(dst_ops
->check
== NULL
))
2305 dst_ops
->check
= xfrm_dst_check
;
2306 if (likely(dst_ops
->negative_advice
== NULL
))
2307 dst_ops
->negative_advice
= xfrm_negative_advice
;
2308 if (likely(dst_ops
->link_failure
== NULL
))
2309 dst_ops
->link_failure
= xfrm_link_failure
;
2310 if (likely(afinfo
->garbage_collect
== NULL
))
2311 afinfo
->garbage_collect
= __xfrm_garbage_collect
;
2312 xfrm_policy_afinfo
[afinfo
->family
] = afinfo
;
2314 write_unlock_bh(&xfrm_policy_afinfo_lock
);
2317 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
2319 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2322 if (unlikely(afinfo
== NULL
))
2324 if (unlikely(afinfo
->family
>= NPROTO
))
2325 return -EAFNOSUPPORT
;
2326 write_lock_bh(&xfrm_policy_afinfo_lock
);
2327 if (likely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
)) {
2328 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != afinfo
))
2331 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
2332 xfrm_policy_afinfo
[afinfo
->family
] = NULL
;
2333 dst_ops
->kmem_cachep
= NULL
;
2334 dst_ops
->check
= NULL
;
2335 dst_ops
->negative_advice
= NULL
;
2336 dst_ops
->link_failure
= NULL
;
2337 afinfo
->garbage_collect
= NULL
;
2340 write_unlock_bh(&xfrm_policy_afinfo_lock
);
2343 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
2345 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
2347 struct xfrm_policy_afinfo
*afinfo
;
2348 if (unlikely(family
>= NPROTO
))
2350 read_lock(&xfrm_policy_afinfo_lock
);
2351 afinfo
= xfrm_policy_afinfo
[family
];
2352 if (unlikely(!afinfo
))
2353 read_unlock(&xfrm_policy_afinfo_lock
);
2357 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2359 read_unlock(&xfrm_policy_afinfo_lock
);
2362 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
2364 struct net_device
*dev
= ptr
;
2368 xfrm_flush_bundles(dev_net(dev
));
2373 static struct notifier_block xfrm_dev_notifier
= {
2374 .notifier_call
= xfrm_dev_event
,
2377 #ifdef CONFIG_XFRM_STATISTICS
2378 static int __net_init
xfrm_statistics_init(struct net
*net
)
2380 if (snmp_mib_init((void **)net
->mib
.xfrm_statistics
,
2381 sizeof(struct linux_xfrm_mib
)) < 0)
2386 static void xfrm_statistics_fini(struct net
*net
)
2388 snmp_mib_free((void **)net
->mib
.xfrm_statistics
);
2391 static int __net_init
xfrm_statistics_init(struct net
*net
)
2396 static void xfrm_statistics_fini(struct net
*net
)
2401 static int __net_init
xfrm_policy_init(struct net
*net
)
2403 unsigned int hmask
, sz
;
2406 if (net_eq(net
, &init_net
))
2407 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
2408 sizeof(struct xfrm_dst
),
2409 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
2413 sz
= (hmask
+1) * sizeof(struct hlist_head
);
2415 net
->xfrm
.policy_byidx
= xfrm_hash_alloc(sz
);
2416 if (!net
->xfrm
.policy_byidx
)
2418 net
->xfrm
.policy_idx_hmask
= hmask
;
2420 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2421 struct xfrm_policy_hash
*htab
;
2423 net
->xfrm
.policy_count
[dir
] = 0;
2424 INIT_HLIST_HEAD(&net
->xfrm
.policy_inexact
[dir
]);
2426 htab
= &net
->xfrm
.policy_bydst
[dir
];
2427 htab
->table
= xfrm_hash_alloc(sz
);
2430 htab
->hmask
= hmask
;
2433 INIT_LIST_HEAD(&net
->xfrm
.policy_all
);
2434 INIT_WORK(&net
->xfrm
.policy_hash_work
, xfrm_hash_resize
);
2435 if (net_eq(net
, &init_net
))
2436 register_netdevice_notifier(&xfrm_dev_notifier
);
2440 for (dir
--; dir
>= 0; dir
--) {
2441 struct xfrm_policy_hash
*htab
;
2443 htab
= &net
->xfrm
.policy_bydst
[dir
];
2444 xfrm_hash_free(htab
->table
, sz
);
2446 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
2451 static void xfrm_policy_fini(struct net
*net
)
2453 struct xfrm_audit audit_info
;
2457 flush_work(&net
->xfrm
.policy_hash_work
);
2458 #ifdef CONFIG_XFRM_SUB_POLICY
2459 audit_info
.loginuid
= -1;
2460 audit_info
.sessionid
= -1;
2461 audit_info
.secid
= 0;
2462 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_SUB
, &audit_info
);
2464 audit_info
.loginuid
= -1;
2465 audit_info
.sessionid
= -1;
2466 audit_info
.secid
= 0;
2467 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_MAIN
, &audit_info
);
2468 flush_work(&xfrm_policy_gc_work
);
2470 WARN_ON(!list_empty(&net
->xfrm
.policy_all
));
2472 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2473 struct xfrm_policy_hash
*htab
;
2475 WARN_ON(!hlist_empty(&net
->xfrm
.policy_inexact
[dir
]));
2477 htab
= &net
->xfrm
.policy_bydst
[dir
];
2478 sz
= (htab
->hmask
+ 1);
2479 WARN_ON(!hlist_empty(htab
->table
));
2480 xfrm_hash_free(htab
->table
, sz
);
2483 sz
= (net
->xfrm
.policy_idx_hmask
+ 1) * sizeof(struct hlist_head
);
2484 WARN_ON(!hlist_empty(net
->xfrm
.policy_byidx
));
2485 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
2488 static int __net_init
xfrm_net_init(struct net
*net
)
2492 rv
= xfrm_statistics_init(net
);
2494 goto out_statistics
;
2495 rv
= xfrm_state_init(net
);
2498 rv
= xfrm_policy_init(net
);
2504 xfrm_state_fini(net
);
2506 xfrm_statistics_fini(net
);
2511 static void __net_exit
xfrm_net_exit(struct net
*net
)
2513 xfrm_policy_fini(net
);
2514 xfrm_state_fini(net
);
2515 xfrm_statistics_fini(net
);
2518 static struct pernet_operations __net_initdata xfrm_net_ops
= {
2519 .init
= xfrm_net_init
,
2520 .exit
= xfrm_net_exit
,
2523 void __init
xfrm_init(void)
2525 register_pernet_subsys(&xfrm_net_ops
);
2527 #ifdef CONFIG_XFRM_STATISTICS
2532 #ifdef CONFIG_AUDITSYSCALL
2533 static void xfrm_audit_common_policyinfo(struct xfrm_policy
*xp
,
2534 struct audit_buffer
*audit_buf
)
2536 struct xfrm_sec_ctx
*ctx
= xp
->security
;
2537 struct xfrm_selector
*sel
= &xp
->selector
;
2540 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
2541 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
2543 switch(sel
->family
) {
2545 audit_log_format(audit_buf
, " src=%pI4", &sel
->saddr
.a4
);
2546 if (sel
->prefixlen_s
!= 32)
2547 audit_log_format(audit_buf
, " src_prefixlen=%d",
2549 audit_log_format(audit_buf
, " dst=%pI4", &sel
->daddr
.a4
);
2550 if (sel
->prefixlen_d
!= 32)
2551 audit_log_format(audit_buf
, " dst_prefixlen=%d",
2555 audit_log_format(audit_buf
, " src=%pI6", sel
->saddr
.a6
);
2556 if (sel
->prefixlen_s
!= 128)
2557 audit_log_format(audit_buf
, " src_prefixlen=%d",
2559 audit_log_format(audit_buf
, " dst=%pI6", sel
->daddr
.a6
);
2560 if (sel
->prefixlen_d
!= 128)
2561 audit_log_format(audit_buf
, " dst_prefixlen=%d",
2567 void xfrm_audit_policy_add(struct xfrm_policy
*xp
, int result
,
2568 uid_t auid
, u32 sessionid
, u32 secid
)
2570 struct audit_buffer
*audit_buf
;
2572 audit_buf
= xfrm_audit_start("SPD-add");
2573 if (audit_buf
== NULL
)
2575 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
2576 audit_log_format(audit_buf
, " res=%u", result
);
2577 xfrm_audit_common_policyinfo(xp
, audit_buf
);
2578 audit_log_end(audit_buf
);
2580 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add
);
2582 void xfrm_audit_policy_delete(struct xfrm_policy
*xp
, int result
,
2583 uid_t auid
, u32 sessionid
, u32 secid
)
2585 struct audit_buffer
*audit_buf
;
2587 audit_buf
= xfrm_audit_start("SPD-delete");
2588 if (audit_buf
== NULL
)
2590 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
2591 audit_log_format(audit_buf
, " res=%u", result
);
2592 xfrm_audit_common_policyinfo(xp
, audit_buf
);
2593 audit_log_end(audit_buf
);
2595 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete
);
2598 #ifdef CONFIG_XFRM_MIGRATE
2599 static int xfrm_migrate_selector_match(struct xfrm_selector
*sel_cmp
,
2600 struct xfrm_selector
*sel_tgt
)
2602 if (sel_cmp
->proto
== IPSEC_ULPROTO_ANY
) {
2603 if (sel_tgt
->family
== sel_cmp
->family
&&
2604 xfrm_addr_cmp(&sel_tgt
->daddr
, &sel_cmp
->daddr
,
2605 sel_cmp
->family
) == 0 &&
2606 xfrm_addr_cmp(&sel_tgt
->saddr
, &sel_cmp
->saddr
,
2607 sel_cmp
->family
) == 0 &&
2608 sel_tgt
->prefixlen_d
== sel_cmp
->prefixlen_d
&&
2609 sel_tgt
->prefixlen_s
== sel_cmp
->prefixlen_s
) {
2613 if (memcmp(sel_tgt
, sel_cmp
, sizeof(*sel_tgt
)) == 0) {
2620 static struct xfrm_policy
* xfrm_migrate_policy_find(struct xfrm_selector
*sel
,
2623 struct xfrm_policy
*pol
, *ret
= NULL
;
2624 struct hlist_node
*entry
;
2625 struct hlist_head
*chain
;
2628 read_lock_bh(&xfrm_policy_lock
);
2629 chain
= policy_hash_direct(&init_net
, &sel
->daddr
, &sel
->saddr
, sel
->family
, dir
);
2630 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
2631 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
2632 pol
->type
== type
) {
2634 priority
= ret
->priority
;
2638 chain
= &init_net
.xfrm
.policy_inexact
[dir
];
2639 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
2640 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
2641 pol
->type
== type
&&
2642 pol
->priority
< priority
) {
2651 read_unlock_bh(&xfrm_policy_lock
);
2656 static int migrate_tmpl_match(struct xfrm_migrate
*m
, struct xfrm_tmpl
*t
)
2660 if (t
->mode
== m
->mode
&& t
->id
.proto
== m
->proto
&&
2661 (m
->reqid
== 0 || t
->reqid
== m
->reqid
)) {
2663 case XFRM_MODE_TUNNEL
:
2664 case XFRM_MODE_BEET
:
2665 if (xfrm_addr_cmp(&t
->id
.daddr
, &m
->old_daddr
,
2666 m
->old_family
) == 0 &&
2667 xfrm_addr_cmp(&t
->saddr
, &m
->old_saddr
,
2668 m
->old_family
) == 0) {
2672 case XFRM_MODE_TRANSPORT
:
2673 /* in case of transport mode, template does not store
2674 any IP addresses, hence we just compare mode and
2685 /* update endpoint address(es) of template(s) */
2686 static int xfrm_policy_migrate(struct xfrm_policy
*pol
,
2687 struct xfrm_migrate
*m
, int num_migrate
)
2689 struct xfrm_migrate
*mp
;
2690 struct dst_entry
*dst
;
2693 write_lock_bh(&pol
->lock
);
2694 if (unlikely(pol
->walk
.dead
)) {
2695 /* target policy has been deleted */
2696 write_unlock_bh(&pol
->lock
);
2700 for (i
= 0; i
< pol
->xfrm_nr
; i
++) {
2701 for (j
= 0, mp
= m
; j
< num_migrate
; j
++, mp
++) {
2702 if (!migrate_tmpl_match(mp
, &pol
->xfrm_vec
[i
]))
2705 if (pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_TUNNEL
&&
2706 pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_BEET
)
2708 /* update endpoints */
2709 memcpy(&pol
->xfrm_vec
[i
].id
.daddr
, &mp
->new_daddr
,
2710 sizeof(pol
->xfrm_vec
[i
].id
.daddr
));
2711 memcpy(&pol
->xfrm_vec
[i
].saddr
, &mp
->new_saddr
,
2712 sizeof(pol
->xfrm_vec
[i
].saddr
));
2713 pol
->xfrm_vec
[i
].encap_family
= mp
->new_family
;
2715 while ((dst
= pol
->bundles
) != NULL
) {
2716 pol
->bundles
= dst
->next
;
2722 write_unlock_bh(&pol
->lock
);
2730 static int xfrm_migrate_check(struct xfrm_migrate
*m
, int num_migrate
)
2734 if (num_migrate
< 1 || num_migrate
> XFRM_MAX_DEPTH
)
2737 for (i
= 0; i
< num_migrate
; i
++) {
2738 if ((xfrm_addr_cmp(&m
[i
].old_daddr
, &m
[i
].new_daddr
,
2739 m
[i
].old_family
) == 0) &&
2740 (xfrm_addr_cmp(&m
[i
].old_saddr
, &m
[i
].new_saddr
,
2741 m
[i
].old_family
) == 0))
2743 if (xfrm_addr_any(&m
[i
].new_daddr
, m
[i
].new_family
) ||
2744 xfrm_addr_any(&m
[i
].new_saddr
, m
[i
].new_family
))
2747 /* check if there is any duplicated entry */
2748 for (j
= i
+ 1; j
< num_migrate
; j
++) {
2749 if (!memcmp(&m
[i
].old_daddr
, &m
[j
].old_daddr
,
2750 sizeof(m
[i
].old_daddr
)) &&
2751 !memcmp(&m
[i
].old_saddr
, &m
[j
].old_saddr
,
2752 sizeof(m
[i
].old_saddr
)) &&
2753 m
[i
].proto
== m
[j
].proto
&&
2754 m
[i
].mode
== m
[j
].mode
&&
2755 m
[i
].reqid
== m
[j
].reqid
&&
2756 m
[i
].old_family
== m
[j
].old_family
)
2764 int xfrm_migrate(struct xfrm_selector
*sel
, u8 dir
, u8 type
,
2765 struct xfrm_migrate
*m
, int num_migrate
,
2766 struct xfrm_kmaddress
*k
)
2768 int i
, err
, nx_cur
= 0, nx_new
= 0;
2769 struct xfrm_policy
*pol
= NULL
;
2770 struct xfrm_state
*x
, *xc
;
2771 struct xfrm_state
*x_cur
[XFRM_MAX_DEPTH
];
2772 struct xfrm_state
*x_new
[XFRM_MAX_DEPTH
];
2773 struct xfrm_migrate
*mp
;
2775 if ((err
= xfrm_migrate_check(m
, num_migrate
)) < 0)
2778 /* Stage 1 - find policy */
2779 if ((pol
= xfrm_migrate_policy_find(sel
, dir
, type
)) == NULL
) {
2784 /* Stage 2 - find and update state(s) */
2785 for (i
= 0, mp
= m
; i
< num_migrate
; i
++, mp
++) {
2786 if ((x
= xfrm_migrate_state_find(mp
))) {
2789 if ((xc
= xfrm_state_migrate(x
, mp
))) {
2799 /* Stage 3 - update policy */
2800 if ((err
= xfrm_policy_migrate(pol
, m
, num_migrate
)) < 0)
2803 /* Stage 4 - delete old state(s) */
2805 xfrm_states_put(x_cur
, nx_cur
);
2806 xfrm_states_delete(x_cur
, nx_cur
);
2809 /* Stage 5 - announce */
2810 km_migrate(sel
, dir
, type
, m
, num_migrate
, k
);
2822 xfrm_states_put(x_cur
, nx_cur
);
2824 xfrm_states_delete(x_new
, nx_new
);
2828 EXPORT_SYMBOL(xfrm_migrate
);