6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
32 #ifdef CONFIG_XFRM_STATISTICS
36 #include "xfrm_hash.h"
38 DEFINE_MUTEX(xfrm_cfg_mutex
);
39 EXPORT_SYMBOL(xfrm_cfg_mutex
);
41 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock
);
42 static struct dst_entry
*xfrm_policy_sk_bundles
;
43 static DEFINE_RWLOCK(xfrm_policy_lock
);
45 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock
);
46 static struct xfrm_policy_afinfo
*xfrm_policy_afinfo
[NPROTO
];
48 static struct kmem_cache
*xfrm_dst_cache __read_mostly
;
50 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
);
51 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
);
52 static void xfrm_init_pmtu(struct dst_entry
*dst
);
53 static int stale_bundle(struct dst_entry
*dst
);
54 static int xfrm_bundle_ok(struct xfrm_dst
*xdst
);
57 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
61 __xfrm4_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
63 const struct flowi4
*fl4
= &fl
->u
.ip4
;
65 return addr4_match(fl4
->daddr
, sel
->daddr
.a4
, sel
->prefixlen_d
) &&
66 addr4_match(fl4
->saddr
, sel
->saddr
.a4
, sel
->prefixlen_s
) &&
67 !((xfrm_flowi_dport(fl
, &fl4
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
68 !((xfrm_flowi_sport(fl
, &fl4
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
69 (fl4
->flowi4_proto
== sel
->proto
|| !sel
->proto
) &&
70 (fl4
->flowi4_oif
== sel
->ifindex
|| !sel
->ifindex
);
74 __xfrm6_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
76 const struct flowi6
*fl6
= &fl
->u
.ip6
;
78 return addr_match(&fl6
->daddr
, &sel
->daddr
, sel
->prefixlen_d
) &&
79 addr_match(&fl6
->saddr
, &sel
->saddr
, sel
->prefixlen_s
) &&
80 !((xfrm_flowi_dport(fl
, &fl6
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
81 !((xfrm_flowi_sport(fl
, &fl6
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
82 (fl6
->flowi6_proto
== sel
->proto
|| !sel
->proto
) &&
83 (fl6
->flowi6_oif
== sel
->ifindex
|| !sel
->ifindex
);
86 bool xfrm_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
,
87 unsigned short family
)
91 return __xfrm4_selector_match(sel
, fl
);
93 return __xfrm6_selector_match(sel
, fl
);
98 static inline struct dst_entry
*__xfrm_dst_lookup(struct net
*net
, int tos
,
99 const xfrm_address_t
*saddr
,
100 const xfrm_address_t
*daddr
,
103 struct xfrm_policy_afinfo
*afinfo
;
104 struct dst_entry
*dst
;
106 afinfo
= xfrm_policy_get_afinfo(family
);
107 if (unlikely(afinfo
== NULL
))
108 return ERR_PTR(-EAFNOSUPPORT
);
110 dst
= afinfo
->dst_lookup(net
, tos
, saddr
, daddr
);
112 xfrm_policy_put_afinfo(afinfo
);
117 static inline struct dst_entry
*xfrm_dst_lookup(struct xfrm_state
*x
, int tos
,
118 xfrm_address_t
*prev_saddr
,
119 xfrm_address_t
*prev_daddr
,
122 struct net
*net
= xs_net(x
);
123 xfrm_address_t
*saddr
= &x
->props
.saddr
;
124 xfrm_address_t
*daddr
= &x
->id
.daddr
;
125 struct dst_entry
*dst
;
127 if (x
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
) {
131 if (x
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
) {
136 dst
= __xfrm_dst_lookup(net
, tos
, saddr
, daddr
, family
);
139 if (prev_saddr
!= saddr
)
140 memcpy(prev_saddr
, saddr
, sizeof(*prev_saddr
));
141 if (prev_daddr
!= daddr
)
142 memcpy(prev_daddr
, daddr
, sizeof(*prev_daddr
));
148 static inline unsigned long make_jiffies(long secs
)
150 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
151 return MAX_SCHEDULE_TIMEOUT
-1;
156 static void xfrm_policy_timer(unsigned long data
)
158 struct xfrm_policy
*xp
= (struct xfrm_policy
*)data
;
159 unsigned long now
= get_seconds();
160 long next
= LONG_MAX
;
164 read_lock(&xp
->lock
);
166 if (unlikely(xp
->walk
.dead
))
169 dir
= xfrm_policy_id2dir(xp
->index
);
171 if (xp
->lft
.hard_add_expires_seconds
) {
172 long tmo
= xp
->lft
.hard_add_expires_seconds
+
173 xp
->curlft
.add_time
- now
;
179 if (xp
->lft
.hard_use_expires_seconds
) {
180 long tmo
= xp
->lft
.hard_use_expires_seconds
+
181 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
187 if (xp
->lft
.soft_add_expires_seconds
) {
188 long tmo
= xp
->lft
.soft_add_expires_seconds
+
189 xp
->curlft
.add_time
- now
;
192 tmo
= XFRM_KM_TIMEOUT
;
197 if (xp
->lft
.soft_use_expires_seconds
) {
198 long tmo
= xp
->lft
.soft_use_expires_seconds
+
199 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
202 tmo
= XFRM_KM_TIMEOUT
;
209 km_policy_expired(xp
, dir
, 0, 0);
210 if (next
!= LONG_MAX
&&
211 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
215 read_unlock(&xp
->lock
);
220 read_unlock(&xp
->lock
);
221 if (!xfrm_policy_delete(xp
, dir
))
222 km_policy_expired(xp
, dir
, 1, 0);
226 static struct flow_cache_object
*xfrm_policy_flo_get(struct flow_cache_object
*flo
)
228 struct xfrm_policy
*pol
= container_of(flo
, struct xfrm_policy
, flo
);
230 if (unlikely(pol
->walk
.dead
))
238 static int xfrm_policy_flo_check(struct flow_cache_object
*flo
)
240 struct xfrm_policy
*pol
= container_of(flo
, struct xfrm_policy
, flo
);
242 return !pol
->walk
.dead
;
245 static void xfrm_policy_flo_delete(struct flow_cache_object
*flo
)
247 xfrm_pol_put(container_of(flo
, struct xfrm_policy
, flo
));
250 static const struct flow_cache_ops xfrm_policy_fc_ops
= {
251 .get
= xfrm_policy_flo_get
,
252 .check
= xfrm_policy_flo_check
,
253 .delete = xfrm_policy_flo_delete
,
256 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
260 struct xfrm_policy
*xfrm_policy_alloc(struct net
*net
, gfp_t gfp
)
262 struct xfrm_policy
*policy
;
264 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
267 write_pnet(&policy
->xp_net
, net
);
268 INIT_LIST_HEAD(&policy
->walk
.all
);
269 INIT_HLIST_NODE(&policy
->bydst
);
270 INIT_HLIST_NODE(&policy
->byidx
);
271 rwlock_init(&policy
->lock
);
272 atomic_set(&policy
->refcnt
, 1);
273 setup_timer(&policy
->timer
, xfrm_policy_timer
,
274 (unsigned long)policy
);
275 policy
->flo
.ops
= &xfrm_policy_fc_ops
;
279 EXPORT_SYMBOL(xfrm_policy_alloc
);
281 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
283 void xfrm_policy_destroy(struct xfrm_policy
*policy
)
285 BUG_ON(!policy
->walk
.dead
);
287 if (del_timer(&policy
->timer
))
290 security_xfrm_policy_free(policy
->security
);
293 EXPORT_SYMBOL(xfrm_policy_destroy
);
295 /* Rule must be locked. Release descentant resources, announce
296 * entry dead. The rule must be unlinked from lists to the moment.
299 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
301 policy
->walk
.dead
= 1;
303 atomic_inc(&policy
->genid
);
305 if (del_timer(&policy
->timer
))
306 xfrm_pol_put(policy
);
308 xfrm_pol_put(policy
);
311 static unsigned int xfrm_policy_hashmax __read_mostly
= 1 * 1024 * 1024;
313 static inline unsigned int idx_hash(struct net
*net
, u32 index
)
315 return __idx_hash(index
, net
->xfrm
.policy_idx_hmask
);
318 static struct hlist_head
*policy_hash_bysel(struct net
*net
,
319 const struct xfrm_selector
*sel
,
320 unsigned short family
, int dir
)
322 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
323 unsigned int hash
= __sel_hash(sel
, family
, hmask
);
325 return (hash
== hmask
+ 1 ?
326 &net
->xfrm
.policy_inexact
[dir
] :
327 net
->xfrm
.policy_bydst
[dir
].table
+ hash
);
330 static struct hlist_head
*policy_hash_direct(struct net
*net
,
331 const xfrm_address_t
*daddr
,
332 const xfrm_address_t
*saddr
,
333 unsigned short family
, int dir
)
335 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
336 unsigned int hash
= __addr_hash(daddr
, saddr
, family
, hmask
);
338 return net
->xfrm
.policy_bydst
[dir
].table
+ hash
;
341 static void xfrm_dst_hash_transfer(struct hlist_head
*list
,
342 struct hlist_head
*ndsttable
,
343 unsigned int nhashmask
)
345 struct hlist_node
*entry
, *tmp
, *entry0
= NULL
;
346 struct xfrm_policy
*pol
;
350 hlist_for_each_entry_safe(pol
, entry
, tmp
, list
, bydst
) {
353 h
= __addr_hash(&pol
->selector
.daddr
, &pol
->selector
.saddr
,
354 pol
->family
, nhashmask
);
357 hlist_add_head(&pol
->bydst
, ndsttable
+h
);
363 hlist_add_after(entry0
, &pol
->bydst
);
367 if (!hlist_empty(list
)) {
373 static void xfrm_idx_hash_transfer(struct hlist_head
*list
,
374 struct hlist_head
*nidxtable
,
375 unsigned int nhashmask
)
377 struct hlist_node
*entry
, *tmp
;
378 struct xfrm_policy
*pol
;
380 hlist_for_each_entry_safe(pol
, entry
, tmp
, list
, byidx
) {
383 h
= __idx_hash(pol
->index
, nhashmask
);
384 hlist_add_head(&pol
->byidx
, nidxtable
+h
);
388 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask
)
390 return ((old_hmask
+ 1) << 1) - 1;
393 static void xfrm_bydst_resize(struct net
*net
, int dir
)
395 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
396 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
397 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
398 struct hlist_head
*odst
= net
->xfrm
.policy_bydst
[dir
].table
;
399 struct hlist_head
*ndst
= xfrm_hash_alloc(nsize
);
405 write_lock_bh(&xfrm_policy_lock
);
407 for (i
= hmask
; i
>= 0; i
--)
408 xfrm_dst_hash_transfer(odst
+ i
, ndst
, nhashmask
);
410 net
->xfrm
.policy_bydst
[dir
].table
= ndst
;
411 net
->xfrm
.policy_bydst
[dir
].hmask
= nhashmask
;
413 write_unlock_bh(&xfrm_policy_lock
);
415 xfrm_hash_free(odst
, (hmask
+ 1) * sizeof(struct hlist_head
));
418 static void xfrm_byidx_resize(struct net
*net
, int total
)
420 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
421 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
422 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
423 struct hlist_head
*oidx
= net
->xfrm
.policy_byidx
;
424 struct hlist_head
*nidx
= xfrm_hash_alloc(nsize
);
430 write_lock_bh(&xfrm_policy_lock
);
432 for (i
= hmask
; i
>= 0; i
--)
433 xfrm_idx_hash_transfer(oidx
+ i
, nidx
, nhashmask
);
435 net
->xfrm
.policy_byidx
= nidx
;
436 net
->xfrm
.policy_idx_hmask
= nhashmask
;
438 write_unlock_bh(&xfrm_policy_lock
);
440 xfrm_hash_free(oidx
, (hmask
+ 1) * sizeof(struct hlist_head
));
443 static inline int xfrm_bydst_should_resize(struct net
*net
, int dir
, int *total
)
445 unsigned int cnt
= net
->xfrm
.policy_count
[dir
];
446 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
451 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
458 static inline int xfrm_byidx_should_resize(struct net
*net
, int total
)
460 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
462 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
469 void xfrm_spd_getinfo(struct net
*net
, struct xfrmk_spdinfo
*si
)
471 read_lock_bh(&xfrm_policy_lock
);
472 si
->incnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
];
473 si
->outcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
];
474 si
->fwdcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
];
475 si
->inscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
+XFRM_POLICY_MAX
];
476 si
->outscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
+XFRM_POLICY_MAX
];
477 si
->fwdscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
+XFRM_POLICY_MAX
];
478 si
->spdhcnt
= net
->xfrm
.policy_idx_hmask
;
479 si
->spdhmcnt
= xfrm_policy_hashmax
;
480 read_unlock_bh(&xfrm_policy_lock
);
482 EXPORT_SYMBOL(xfrm_spd_getinfo
);
484 static DEFINE_MUTEX(hash_resize_mutex
);
485 static void xfrm_hash_resize(struct work_struct
*work
)
487 struct net
*net
= container_of(work
, struct net
, xfrm
.policy_hash_work
);
490 mutex_lock(&hash_resize_mutex
);
493 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
494 if (xfrm_bydst_should_resize(net
, dir
, &total
))
495 xfrm_bydst_resize(net
, dir
);
497 if (xfrm_byidx_should_resize(net
, total
))
498 xfrm_byidx_resize(net
, total
);
500 mutex_unlock(&hash_resize_mutex
);
503 /* Generate new index... KAME seems to generate them ordered by cost
504 * of an absolute inpredictability of ordering of rules. This will not pass. */
505 static u32
xfrm_gen_index(struct net
*net
, int dir
)
507 static u32 idx_generator
;
510 struct hlist_node
*entry
;
511 struct hlist_head
*list
;
512 struct xfrm_policy
*p
;
516 idx
= (idx_generator
| dir
);
520 list
= net
->xfrm
.policy_byidx
+ idx_hash(net
, idx
);
522 hlist_for_each_entry(p
, entry
, list
, byidx
) {
523 if (p
->index
== idx
) {
533 static inline int selector_cmp(struct xfrm_selector
*s1
, struct xfrm_selector
*s2
)
535 u32
*p1
= (u32
*) s1
;
536 u32
*p2
= (u32
*) s2
;
537 int len
= sizeof(struct xfrm_selector
) / sizeof(u32
);
540 for (i
= 0; i
< len
; i
++) {
548 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
550 struct net
*net
= xp_net(policy
);
551 struct xfrm_policy
*pol
;
552 struct xfrm_policy
*delpol
;
553 struct hlist_head
*chain
;
554 struct hlist_node
*entry
, *newpos
;
555 u32 mark
= policy
->mark
.v
& policy
->mark
.m
;
557 write_lock_bh(&xfrm_policy_lock
);
558 chain
= policy_hash_bysel(net
, &policy
->selector
, policy
->family
, dir
);
561 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
562 if (pol
->type
== policy
->type
&&
563 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
564 (mark
& pol
->mark
.m
) == pol
->mark
.v
&&
565 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
568 write_unlock_bh(&xfrm_policy_lock
);
572 if (policy
->priority
> pol
->priority
)
574 } else if (policy
->priority
>= pol
->priority
) {
575 newpos
= &pol
->bydst
;
582 hlist_add_after(newpos
, &policy
->bydst
);
584 hlist_add_head(&policy
->bydst
, chain
);
585 xfrm_pol_hold(policy
);
586 net
->xfrm
.policy_count
[dir
]++;
587 atomic_inc(&flow_cache_genid
);
590 __xfrm_policy_unlink(delpol
, dir
);
591 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(net
, dir
);
592 hlist_add_head(&policy
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, policy
->index
));
593 policy
->curlft
.add_time
= get_seconds();
594 policy
->curlft
.use_time
= 0;
595 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
596 xfrm_pol_hold(policy
);
597 list_add(&policy
->walk
.all
, &net
->xfrm
.policy_all
);
598 write_unlock_bh(&xfrm_policy_lock
);
601 xfrm_policy_kill(delpol
);
602 else if (xfrm_bydst_should_resize(net
, dir
, NULL
))
603 schedule_work(&net
->xfrm
.policy_hash_work
);
607 EXPORT_SYMBOL(xfrm_policy_insert
);
609 struct xfrm_policy
*xfrm_policy_bysel_ctx(struct net
*net
, u32 mark
, u8 type
,
610 int dir
, struct xfrm_selector
*sel
,
611 struct xfrm_sec_ctx
*ctx
, int delete,
614 struct xfrm_policy
*pol
, *ret
;
615 struct hlist_head
*chain
;
616 struct hlist_node
*entry
;
619 write_lock_bh(&xfrm_policy_lock
);
620 chain
= policy_hash_bysel(net
, sel
, sel
->family
, dir
);
622 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
623 if (pol
->type
== type
&&
624 (mark
& pol
->mark
.m
) == pol
->mark
.v
&&
625 !selector_cmp(sel
, &pol
->selector
) &&
626 xfrm_sec_ctx_match(ctx
, pol
->security
)) {
629 *err
= security_xfrm_policy_delete(
632 write_unlock_bh(&xfrm_policy_lock
);
635 __xfrm_policy_unlink(pol
, dir
);
641 write_unlock_bh(&xfrm_policy_lock
);
644 xfrm_policy_kill(ret
);
647 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
649 struct xfrm_policy
*xfrm_policy_byid(struct net
*net
, u32 mark
, u8 type
,
650 int dir
, u32 id
, int delete, int *err
)
652 struct xfrm_policy
*pol
, *ret
;
653 struct hlist_head
*chain
;
654 struct hlist_node
*entry
;
657 if (xfrm_policy_id2dir(id
) != dir
)
661 write_lock_bh(&xfrm_policy_lock
);
662 chain
= net
->xfrm
.policy_byidx
+ idx_hash(net
, id
);
664 hlist_for_each_entry(pol
, entry
, chain
, byidx
) {
665 if (pol
->type
== type
&& pol
->index
== id
&&
666 (mark
& pol
->mark
.m
) == pol
->mark
.v
) {
669 *err
= security_xfrm_policy_delete(
672 write_unlock_bh(&xfrm_policy_lock
);
675 __xfrm_policy_unlink(pol
, dir
);
681 write_unlock_bh(&xfrm_policy_lock
);
684 xfrm_policy_kill(ret
);
687 EXPORT_SYMBOL(xfrm_policy_byid
);
689 #ifdef CONFIG_SECURITY_NETWORK_XFRM
691 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
695 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
696 struct xfrm_policy
*pol
;
697 struct hlist_node
*entry
;
700 hlist_for_each_entry(pol
, entry
,
701 &net
->xfrm
.policy_inexact
[dir
], bydst
) {
702 if (pol
->type
!= type
)
704 err
= security_xfrm_policy_delete(pol
->security
);
706 xfrm_audit_policy_delete(pol
, 0,
707 audit_info
->loginuid
,
708 audit_info
->sessionid
,
713 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
714 hlist_for_each_entry(pol
, entry
,
715 net
->xfrm
.policy_bydst
[dir
].table
+ i
,
717 if (pol
->type
!= type
)
719 err
= security_xfrm_policy_delete(
722 xfrm_audit_policy_delete(pol
, 0,
723 audit_info
->loginuid
,
724 audit_info
->sessionid
,
735 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
741 int xfrm_policy_flush(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
743 int dir
, err
= 0, cnt
= 0;
745 write_lock_bh(&xfrm_policy_lock
);
747 err
= xfrm_policy_flush_secctx_check(net
, type
, audit_info
);
751 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
752 struct xfrm_policy
*pol
;
753 struct hlist_node
*entry
;
757 hlist_for_each_entry(pol
, entry
,
758 &net
->xfrm
.policy_inexact
[dir
], bydst
) {
759 if (pol
->type
!= type
)
761 __xfrm_policy_unlink(pol
, dir
);
762 write_unlock_bh(&xfrm_policy_lock
);
765 xfrm_audit_policy_delete(pol
, 1, audit_info
->loginuid
,
766 audit_info
->sessionid
,
769 xfrm_policy_kill(pol
);
771 write_lock_bh(&xfrm_policy_lock
);
775 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
777 hlist_for_each_entry(pol
, entry
,
778 net
->xfrm
.policy_bydst
[dir
].table
+ i
,
780 if (pol
->type
!= type
)
782 __xfrm_policy_unlink(pol
, dir
);
783 write_unlock_bh(&xfrm_policy_lock
);
786 xfrm_audit_policy_delete(pol
, 1,
787 audit_info
->loginuid
,
788 audit_info
->sessionid
,
790 xfrm_policy_kill(pol
);
792 write_lock_bh(&xfrm_policy_lock
);
801 write_unlock_bh(&xfrm_policy_lock
);
804 EXPORT_SYMBOL(xfrm_policy_flush
);
806 int xfrm_policy_walk(struct net
*net
, struct xfrm_policy_walk
*walk
,
807 int (*func
)(struct xfrm_policy
*, int, int, void*),
810 struct xfrm_policy
*pol
;
811 struct xfrm_policy_walk_entry
*x
;
814 if (walk
->type
>= XFRM_POLICY_TYPE_MAX
&&
815 walk
->type
!= XFRM_POLICY_TYPE_ANY
)
818 if (list_empty(&walk
->walk
.all
) && walk
->seq
!= 0)
821 write_lock_bh(&xfrm_policy_lock
);
822 if (list_empty(&walk
->walk
.all
))
823 x
= list_first_entry(&net
->xfrm
.policy_all
, struct xfrm_policy_walk_entry
, all
);
825 x
= list_entry(&walk
->walk
.all
, struct xfrm_policy_walk_entry
, all
);
826 list_for_each_entry_from(x
, &net
->xfrm
.policy_all
, all
) {
829 pol
= container_of(x
, struct xfrm_policy
, walk
);
830 if (walk
->type
!= XFRM_POLICY_TYPE_ANY
&&
831 walk
->type
!= pol
->type
)
833 error
= func(pol
, xfrm_policy_id2dir(pol
->index
),
836 list_move_tail(&walk
->walk
.all
, &x
->all
);
841 if (walk
->seq
== 0) {
845 list_del_init(&walk
->walk
.all
);
847 write_unlock_bh(&xfrm_policy_lock
);
850 EXPORT_SYMBOL(xfrm_policy_walk
);
852 void xfrm_policy_walk_init(struct xfrm_policy_walk
*walk
, u8 type
)
854 INIT_LIST_HEAD(&walk
->walk
.all
);
859 EXPORT_SYMBOL(xfrm_policy_walk_init
);
861 void xfrm_policy_walk_done(struct xfrm_policy_walk
*walk
)
863 if (list_empty(&walk
->walk
.all
))
866 write_lock_bh(&xfrm_policy_lock
);
867 list_del(&walk
->walk
.all
);
868 write_unlock_bh(&xfrm_policy_lock
);
870 EXPORT_SYMBOL(xfrm_policy_walk_done
);
873 * Find policy to apply to this flow.
875 * Returns 0 if policy found, else an -errno.
877 static int xfrm_policy_match(const struct xfrm_policy
*pol
,
878 const struct flowi
*fl
,
879 u8 type
, u16 family
, int dir
)
881 const struct xfrm_selector
*sel
= &pol
->selector
;
885 if (pol
->family
!= family
||
886 (fl
->flowi_mark
& pol
->mark
.m
) != pol
->mark
.v
||
890 match
= xfrm_selector_match(sel
, fl
, family
);
892 ret
= security_xfrm_policy_lookup(pol
->security
, fl
->flowi_secid
,
898 static struct xfrm_policy
*xfrm_policy_lookup_bytype(struct net
*net
, u8 type
,
899 const struct flowi
*fl
,
903 struct xfrm_policy
*pol
, *ret
;
904 const xfrm_address_t
*daddr
, *saddr
;
905 struct hlist_node
*entry
;
906 struct hlist_head
*chain
;
909 daddr
= xfrm_flowi_daddr(fl
, family
);
910 saddr
= xfrm_flowi_saddr(fl
, family
);
911 if (unlikely(!daddr
|| !saddr
))
914 read_lock_bh(&xfrm_policy_lock
);
915 chain
= policy_hash_direct(net
, daddr
, saddr
, family
, dir
);
917 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
918 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
);
928 priority
= ret
->priority
;
932 chain
= &net
->xfrm
.policy_inexact
[dir
];
933 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
934 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
);
942 } else if (pol
->priority
< priority
) {
950 read_unlock_bh(&xfrm_policy_lock
);
955 static struct xfrm_policy
*
956 __xfrm_policy_lookup(struct net
*net
, const struct flowi
*fl
, u16 family
, u8 dir
)
958 #ifdef CONFIG_XFRM_SUB_POLICY
959 struct xfrm_policy
*pol
;
961 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_SUB
, fl
, family
, dir
);
965 return xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
, fl
, family
, dir
);
968 static struct flow_cache_object
*
969 xfrm_policy_lookup(struct net
*net
, const struct flowi
*fl
, u16 family
,
970 u8 dir
, struct flow_cache_object
*old_obj
, void *ctx
)
972 struct xfrm_policy
*pol
;
975 xfrm_pol_put(container_of(old_obj
, struct xfrm_policy
, flo
));
977 pol
= __xfrm_policy_lookup(net
, fl
, family
, dir
);
978 if (IS_ERR_OR_NULL(pol
))
979 return ERR_CAST(pol
);
981 /* Resolver returns two references:
982 * one for cache and one for caller of flow_cache_lookup() */
988 static inline int policy_to_flow_dir(int dir
)
990 if (XFRM_POLICY_IN
== FLOW_DIR_IN
&&
991 XFRM_POLICY_OUT
== FLOW_DIR_OUT
&&
992 XFRM_POLICY_FWD
== FLOW_DIR_FWD
)
998 case XFRM_POLICY_OUT
:
1000 case XFRM_POLICY_FWD
:
1001 return FLOW_DIR_FWD
;
1005 static struct xfrm_policy
*xfrm_sk_policy_lookup(struct sock
*sk
, int dir
,
1006 const struct flowi
*fl
)
1008 struct xfrm_policy
*pol
;
1010 read_lock_bh(&xfrm_policy_lock
);
1011 if ((pol
= sk
->sk_policy
[dir
]) != NULL
) {
1012 bool match
= xfrm_selector_match(&pol
->selector
, fl
,
1017 if ((sk
->sk_mark
& pol
->mark
.m
) != pol
->mark
.v
) {
1021 err
= security_xfrm_policy_lookup(pol
->security
,
1023 policy_to_flow_dir(dir
));
1026 else if (err
== -ESRCH
)
1034 read_unlock_bh(&xfrm_policy_lock
);
1038 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
1040 struct net
*net
= xp_net(pol
);
1041 struct hlist_head
*chain
= policy_hash_bysel(net
, &pol
->selector
,
1044 list_add(&pol
->walk
.all
, &net
->xfrm
.policy_all
);
1045 hlist_add_head(&pol
->bydst
, chain
);
1046 hlist_add_head(&pol
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, pol
->index
));
1047 net
->xfrm
.policy_count
[dir
]++;
1050 if (xfrm_bydst_should_resize(net
, dir
, NULL
))
1051 schedule_work(&net
->xfrm
.policy_hash_work
);
1054 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
1057 struct net
*net
= xp_net(pol
);
1059 if (hlist_unhashed(&pol
->bydst
))
1062 hlist_del(&pol
->bydst
);
1063 hlist_del(&pol
->byidx
);
1064 list_del(&pol
->walk
.all
);
1065 net
->xfrm
.policy_count
[dir
]--;
1070 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
1072 write_lock_bh(&xfrm_policy_lock
);
1073 pol
= __xfrm_policy_unlink(pol
, dir
);
1074 write_unlock_bh(&xfrm_policy_lock
);
1076 xfrm_policy_kill(pol
);
1081 EXPORT_SYMBOL(xfrm_policy_delete
);
1083 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
1085 struct net
*net
= xp_net(pol
);
1086 struct xfrm_policy
*old_pol
;
1088 #ifdef CONFIG_XFRM_SUB_POLICY
1089 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
1093 write_lock_bh(&xfrm_policy_lock
);
1094 old_pol
= sk
->sk_policy
[dir
];
1095 sk
->sk_policy
[dir
] = pol
;
1097 pol
->curlft
.add_time
= get_seconds();
1098 pol
->index
= xfrm_gen_index(net
, XFRM_POLICY_MAX
+dir
);
1099 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+dir
);
1102 /* Unlinking succeeds always. This is the only function
1103 * allowed to delete or replace socket policy.
1105 __xfrm_policy_unlink(old_pol
, XFRM_POLICY_MAX
+dir
);
1106 write_unlock_bh(&xfrm_policy_lock
);
1109 xfrm_policy_kill(old_pol
);
1114 static struct xfrm_policy
*clone_policy(const struct xfrm_policy
*old
, int dir
)
1116 struct xfrm_policy
*newp
= xfrm_policy_alloc(xp_net(old
), GFP_ATOMIC
);
1119 newp
->selector
= old
->selector
;
1120 if (security_xfrm_policy_clone(old
->security
,
1123 return NULL
; /* ENOMEM */
1125 newp
->lft
= old
->lft
;
1126 newp
->curlft
= old
->curlft
;
1127 newp
->mark
= old
->mark
;
1128 newp
->action
= old
->action
;
1129 newp
->flags
= old
->flags
;
1130 newp
->xfrm_nr
= old
->xfrm_nr
;
1131 newp
->index
= old
->index
;
1132 newp
->type
= old
->type
;
1133 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
1134 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
1135 write_lock_bh(&xfrm_policy_lock
);
1136 __xfrm_policy_link(newp
, XFRM_POLICY_MAX
+dir
);
1137 write_unlock_bh(&xfrm_policy_lock
);
1143 int __xfrm_sk_clone_policy(struct sock
*sk
)
1145 struct xfrm_policy
*p0
= sk
->sk_policy
[0],
1146 *p1
= sk
->sk_policy
[1];
1148 sk
->sk_policy
[0] = sk
->sk_policy
[1] = NULL
;
1149 if (p0
&& (sk
->sk_policy
[0] = clone_policy(p0
, 0)) == NULL
)
1151 if (p1
&& (sk
->sk_policy
[1] = clone_policy(p1
, 1)) == NULL
)
1157 xfrm_get_saddr(struct net
*net
, xfrm_address_t
*local
, xfrm_address_t
*remote
,
1158 unsigned short family
)
1161 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1163 if (unlikely(afinfo
== NULL
))
1165 err
= afinfo
->get_saddr(net
, local
, remote
);
1166 xfrm_policy_put_afinfo(afinfo
);
1170 /* Resolve list of templates for the flow, given policy. */
1173 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, const struct flowi
*fl
,
1174 struct xfrm_state
**xfrm
, unsigned short family
)
1176 struct net
*net
= xp_net(policy
);
1179 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
1180 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
1183 for (nx
=0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
1184 struct xfrm_state
*x
;
1185 xfrm_address_t
*remote
= daddr
;
1186 xfrm_address_t
*local
= saddr
;
1187 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
1189 if (tmpl
->mode
== XFRM_MODE_TUNNEL
||
1190 tmpl
->mode
== XFRM_MODE_BEET
) {
1191 remote
= &tmpl
->id
.daddr
;
1192 local
= &tmpl
->saddr
;
1193 if (xfrm_addr_any(local
, tmpl
->encap_family
)) {
1194 error
= xfrm_get_saddr(net
, &tmp
, remote
, tmpl
->encap_family
);
1201 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
, family
);
1203 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
1210 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
1214 else if (error
== -ESRCH
)
1217 if (!tmpl
->optional
)
1223 for (nx
--; nx
>=0; nx
--)
1224 xfrm_state_put(xfrm
[nx
]);
1229 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, const struct flowi
*fl
,
1230 struct xfrm_state
**xfrm
, unsigned short family
)
1232 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
1233 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
1239 for (i
= 0; i
< npols
; i
++) {
1240 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
1245 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
1253 /* found states are sorted for outbound processing */
1255 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
1260 for (cnx
--; cnx
>=0; cnx
--)
1261 xfrm_state_put(tpp
[cnx
]);
1266 /* Check that the bundle accepts the flow and its components are
1270 static inline int xfrm_get_tos(const struct flowi
*fl
, int family
)
1272 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1278 tos
= afinfo
->get_tos(fl
);
1280 xfrm_policy_put_afinfo(afinfo
);
1285 static struct flow_cache_object
*xfrm_bundle_flo_get(struct flow_cache_object
*flo
)
1287 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1288 struct dst_entry
*dst
= &xdst
->u
.dst
;
1290 if (xdst
->route
== NULL
) {
1291 /* Dummy bundle - if it has xfrms we were not
1292 * able to build bundle as template resolution failed.
1293 * It means we need to try again resolving. */
1294 if (xdst
->num_xfrms
> 0)
1298 if (stale_bundle(dst
))
1306 static int xfrm_bundle_flo_check(struct flow_cache_object
*flo
)
1308 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1309 struct dst_entry
*dst
= &xdst
->u
.dst
;
1313 if (stale_bundle(dst
))
1319 static void xfrm_bundle_flo_delete(struct flow_cache_object
*flo
)
1321 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1322 struct dst_entry
*dst
= &xdst
->u
.dst
;
1327 static const struct flow_cache_ops xfrm_bundle_fc_ops
= {
1328 .get
= xfrm_bundle_flo_get
,
1329 .check
= xfrm_bundle_flo_check
,
1330 .delete = xfrm_bundle_flo_delete
,
1333 static inline struct xfrm_dst
*xfrm_alloc_dst(struct net
*net
, int family
)
1335 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1336 struct dst_ops
*dst_ops
;
1337 struct xfrm_dst
*xdst
;
1340 return ERR_PTR(-EINVAL
);
1344 dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
1346 #if IS_ENABLED(CONFIG_IPV6)
1348 dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
1354 xdst
= dst_alloc(dst_ops
, NULL
, 0, DST_OBSOLETE_NONE
, 0);
1357 struct dst_entry
*dst
= &xdst
->u
.dst
;
1359 memset(dst
+ 1, 0, sizeof(*xdst
) - sizeof(*dst
));
1360 xdst
->flo
.ops
= &xfrm_bundle_fc_ops
;
1361 if (afinfo
->init_dst
)
1362 afinfo
->init_dst(net
, xdst
);
1364 xdst
= ERR_PTR(-ENOBUFS
);
1366 xfrm_policy_put_afinfo(afinfo
);
1371 static inline int xfrm_init_path(struct xfrm_dst
*path
, struct dst_entry
*dst
,
1374 struct xfrm_policy_afinfo
*afinfo
=
1375 xfrm_policy_get_afinfo(dst
->ops
->family
);
1381 err
= afinfo
->init_path(path
, dst
, nfheader_len
);
1383 xfrm_policy_put_afinfo(afinfo
);
1388 static inline int xfrm_fill_dst(struct xfrm_dst
*xdst
, struct net_device
*dev
,
1389 const struct flowi
*fl
)
1391 struct xfrm_policy_afinfo
*afinfo
=
1392 xfrm_policy_get_afinfo(xdst
->u
.dst
.ops
->family
);
1398 err
= afinfo
->fill_dst(xdst
, dev
, fl
);
1400 xfrm_policy_put_afinfo(afinfo
);
1406 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1407 * all the metrics... Shortly, bundle a bundle.
1410 static struct dst_entry
*xfrm_bundle_create(struct xfrm_policy
*policy
,
1411 struct xfrm_state
**xfrm
, int nx
,
1412 const struct flowi
*fl
,
1413 struct dst_entry
*dst
)
1415 struct net
*net
= xp_net(policy
);
1416 unsigned long now
= jiffies
;
1417 struct net_device
*dev
;
1418 struct xfrm_mode
*inner_mode
;
1419 struct dst_entry
*dst_prev
= NULL
;
1420 struct dst_entry
*dst0
= NULL
;
1424 int nfheader_len
= 0;
1425 int trailer_len
= 0;
1427 int family
= policy
->selector
.family
;
1428 xfrm_address_t saddr
, daddr
;
1430 xfrm_flowi_addr_get(fl
, &saddr
, &daddr
, family
);
1432 tos
= xfrm_get_tos(fl
, family
);
1439 for (; i
< nx
; i
++) {
1440 struct xfrm_dst
*xdst
= xfrm_alloc_dst(net
, family
);
1441 struct dst_entry
*dst1
= &xdst
->u
.dst
;
1443 err
= PTR_ERR(xdst
);
1449 if (xfrm
[i
]->sel
.family
== AF_UNSPEC
) {
1450 inner_mode
= xfrm_ip2inner_mode(xfrm
[i
],
1451 xfrm_af2proto(family
));
1453 err
= -EAFNOSUPPORT
;
1458 inner_mode
= xfrm
[i
]->inner_mode
;
1463 dst_prev
->child
= dst_clone(dst1
);
1464 dst1
->flags
|= DST_NOHASH
;
1468 dst_copy_metrics(dst1
, dst
);
1470 if (xfrm
[i
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1471 family
= xfrm
[i
]->props
.family
;
1472 dst
= xfrm_dst_lookup(xfrm
[i
], tos
, &saddr
, &daddr
,
1480 dst1
->xfrm
= xfrm
[i
];
1481 xdst
->xfrm_genid
= xfrm
[i
]->genid
;
1483 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
1484 dst1
->flags
|= DST_HOST
;
1485 dst1
->lastuse
= now
;
1487 dst1
->input
= dst_discard
;
1488 dst1
->output
= inner_mode
->afinfo
->output
;
1490 dst1
->next
= dst_prev
;
1493 header_len
+= xfrm
[i
]->props
.header_len
;
1494 if (xfrm
[i
]->type
->flags
& XFRM_TYPE_NON_FRAGMENT
)
1495 nfheader_len
+= xfrm
[i
]->props
.header_len
;
1496 trailer_len
+= xfrm
[i
]->props
.trailer_len
;
1499 dst_prev
->child
= dst
;
1507 xfrm_init_path((struct xfrm_dst
*)dst0
, dst
, nfheader_len
);
1508 xfrm_init_pmtu(dst_prev
);
1510 for (dst_prev
= dst0
; dst_prev
!= dst
; dst_prev
= dst_prev
->child
) {
1511 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst_prev
;
1513 err
= xfrm_fill_dst(xdst
, dev
, fl
);
1517 dst_prev
->header_len
= header_len
;
1518 dst_prev
->trailer_len
= trailer_len
;
1519 header_len
-= xdst
->u
.dst
.xfrm
->props
.header_len
;
1520 trailer_len
-= xdst
->u
.dst
.xfrm
->props
.trailer_len
;
1528 xfrm_state_put(xfrm
[i
]);
1532 dst0
= ERR_PTR(err
);
1537 xfrm_dst_alloc_copy(void **target
, const void *src
, int size
)
1540 *target
= kmalloc(size
, GFP_ATOMIC
);
1544 memcpy(*target
, src
, size
);
1549 xfrm_dst_update_parent(struct dst_entry
*dst
, const struct xfrm_selector
*sel
)
1551 #ifdef CONFIG_XFRM_SUB_POLICY
1552 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1553 return xfrm_dst_alloc_copy((void **)&(xdst
->partner
),
1561 xfrm_dst_update_origin(struct dst_entry
*dst
, const struct flowi
*fl
)
1563 #ifdef CONFIG_XFRM_SUB_POLICY
1564 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1565 return xfrm_dst_alloc_copy((void **)&(xdst
->origin
), fl
, sizeof(*fl
));
1571 static int xfrm_expand_policies(const struct flowi
*fl
, u16 family
,
1572 struct xfrm_policy
**pols
,
1573 int *num_pols
, int *num_xfrms
)
1577 if (*num_pols
== 0 || !pols
[0]) {
1582 if (IS_ERR(pols
[0]))
1583 return PTR_ERR(pols
[0]);
1585 *num_xfrms
= pols
[0]->xfrm_nr
;
1587 #ifdef CONFIG_XFRM_SUB_POLICY
1588 if (pols
[0] && pols
[0]->action
== XFRM_POLICY_ALLOW
&&
1589 pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
1590 pols
[1] = xfrm_policy_lookup_bytype(xp_net(pols
[0]),
1591 XFRM_POLICY_TYPE_MAIN
,
1595 if (IS_ERR(pols
[1])) {
1596 xfrm_pols_put(pols
, *num_pols
);
1597 return PTR_ERR(pols
[1]);
1600 (*num_xfrms
) += pols
[1]->xfrm_nr
;
1604 for (i
= 0; i
< *num_pols
; i
++) {
1605 if (pols
[i
]->action
!= XFRM_POLICY_ALLOW
) {
1615 static struct xfrm_dst
*
1616 xfrm_resolve_and_create_bundle(struct xfrm_policy
**pols
, int num_pols
,
1617 const struct flowi
*fl
, u16 family
,
1618 struct dst_entry
*dst_orig
)
1620 struct net
*net
= xp_net(pols
[0]);
1621 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
1622 struct dst_entry
*dst
;
1623 struct xfrm_dst
*xdst
;
1626 /* Try to instantiate a bundle */
1627 err
= xfrm_tmpl_resolve(pols
, num_pols
, fl
, xfrm
, family
);
1629 if (err
!= 0 && err
!= -EAGAIN
)
1630 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
1631 return ERR_PTR(err
);
1634 dst
= xfrm_bundle_create(pols
[0], xfrm
, err
, fl
, dst_orig
);
1636 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLEGENERROR
);
1637 return ERR_CAST(dst
);
1640 xdst
= (struct xfrm_dst
*)dst
;
1641 xdst
->num_xfrms
= err
;
1643 err
= xfrm_dst_update_parent(dst
, &pols
[1]->selector
);
1645 err
= xfrm_dst_update_origin(dst
, fl
);
1646 if (unlikely(err
)) {
1648 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLECHECKERROR
);
1649 return ERR_PTR(err
);
1652 xdst
->num_pols
= num_pols
;
1653 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
1654 xdst
->policy_genid
= atomic_read(&pols
[0]->genid
);
1659 static struct flow_cache_object
*
1660 xfrm_bundle_lookup(struct net
*net
, const struct flowi
*fl
, u16 family
, u8 dir
,
1661 struct flow_cache_object
*oldflo
, void *ctx
)
1663 struct dst_entry
*dst_orig
= (struct dst_entry
*)ctx
;
1664 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
1665 struct xfrm_dst
*xdst
, *new_xdst
;
1666 int num_pols
= 0, num_xfrms
= 0, i
, err
, pol_dead
;
1668 /* Check if the policies from old bundle are usable */
1671 xdst
= container_of(oldflo
, struct xfrm_dst
, flo
);
1672 num_pols
= xdst
->num_pols
;
1673 num_xfrms
= xdst
->num_xfrms
;
1675 for (i
= 0; i
< num_pols
; i
++) {
1676 pols
[i
] = xdst
->pols
[i
];
1677 pol_dead
|= pols
[i
]->walk
.dead
;
1680 dst_free(&xdst
->u
.dst
);
1688 /* Resolve policies to use if we couldn't get them from
1689 * previous cache entry */
1692 pols
[0] = __xfrm_policy_lookup(net
, fl
, family
, dir
);
1693 err
= xfrm_expand_policies(fl
, family
, pols
,
1694 &num_pols
, &num_xfrms
);
1700 goto make_dummy_bundle
;
1703 new_xdst
= xfrm_resolve_and_create_bundle(pols
, num_pols
, fl
, family
, dst_orig
);
1704 if (IS_ERR(new_xdst
)) {
1705 err
= PTR_ERR(new_xdst
);
1709 goto make_dummy_bundle
;
1710 dst_hold(&xdst
->u
.dst
);
1712 } else if (new_xdst
== NULL
) {
1715 goto make_dummy_bundle
;
1716 xdst
->num_xfrms
= 0;
1717 dst_hold(&xdst
->u
.dst
);
1721 /* Kill the previous bundle */
1723 /* The policies were stolen for newly generated bundle */
1725 dst_free(&xdst
->u
.dst
);
1728 /* Flow cache does not have reference, it dst_free()'s,
1729 * but we do need to return one reference for original caller */
1730 dst_hold(&new_xdst
->u
.dst
);
1731 return &new_xdst
->flo
;
1734 /* We found policies, but there's no bundles to instantiate:
1735 * either because the policy blocks, has no transformations or
1736 * we could not build template (no xfrm_states).*/
1737 xdst
= xfrm_alloc_dst(net
, family
);
1739 xfrm_pols_put(pols
, num_pols
);
1740 return ERR_CAST(xdst
);
1742 xdst
->num_pols
= num_pols
;
1743 xdst
->num_xfrms
= num_xfrms
;
1744 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
1746 dst_hold(&xdst
->u
.dst
);
1750 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
1753 dst_free(&xdst
->u
.dst
);
1755 xfrm_pols_put(pols
, num_pols
);
1756 return ERR_PTR(err
);
1759 static struct dst_entry
*make_blackhole(struct net
*net
, u16 family
,
1760 struct dst_entry
*dst_orig
)
1762 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1763 struct dst_entry
*ret
;
1766 dst_release(dst_orig
);
1767 return ERR_PTR(-EINVAL
);
1769 ret
= afinfo
->blackhole_route(net
, dst_orig
);
1771 xfrm_policy_put_afinfo(afinfo
);
1776 /* Main function: finds/creates a bundle for given flow.
1778 * At the moment we eat a raw IP route. Mostly to speed up lookups
1779 * on interfaces with disabled IPsec.
1781 struct dst_entry
*xfrm_lookup(struct net
*net
, struct dst_entry
*dst_orig
,
1782 const struct flowi
*fl
,
1783 struct sock
*sk
, int flags
)
1785 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
1786 struct flow_cache_object
*flo
;
1787 struct xfrm_dst
*xdst
;
1788 struct dst_entry
*dst
, *route
;
1789 u16 family
= dst_orig
->ops
->family
;
1790 u8 dir
= policy_to_flow_dir(XFRM_POLICY_OUT
);
1791 int i
, err
, num_pols
, num_xfrms
= 0, drop_pols
= 0;
1798 if (sk
&& sk
->sk_policy
[XFRM_POLICY_OUT
]) {
1800 pols
[0] = xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
);
1801 err
= xfrm_expand_policies(fl
, family
, pols
,
1802 &num_pols
, &num_xfrms
);
1807 if (num_xfrms
<= 0) {
1808 drop_pols
= num_pols
;
1812 xdst
= xfrm_resolve_and_create_bundle(
1816 xfrm_pols_put(pols
, num_pols
);
1817 err
= PTR_ERR(xdst
);
1819 } else if (xdst
== NULL
) {
1821 drop_pols
= num_pols
;
1825 dst_hold(&xdst
->u
.dst
);
1827 spin_lock_bh(&xfrm_policy_sk_bundle_lock
);
1828 xdst
->u
.dst
.next
= xfrm_policy_sk_bundles
;
1829 xfrm_policy_sk_bundles
= &xdst
->u
.dst
;
1830 spin_unlock_bh(&xfrm_policy_sk_bundle_lock
);
1832 route
= xdst
->route
;
1837 /* To accelerate a bit... */
1838 if ((dst_orig
->flags
& DST_NOXFRM
) ||
1839 !net
->xfrm
.policy_count
[XFRM_POLICY_OUT
])
1842 flo
= flow_cache_lookup(net
, fl
, family
, dir
,
1843 xfrm_bundle_lookup
, dst_orig
);
1850 xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1852 num_pols
= xdst
->num_pols
;
1853 num_xfrms
= xdst
->num_xfrms
;
1854 memcpy(pols
, xdst
->pols
, sizeof(struct xfrm_policy
*) * num_pols
);
1855 route
= xdst
->route
;
1859 if (route
== NULL
&& num_xfrms
> 0) {
1860 /* The only case when xfrm_bundle_lookup() returns a
1861 * bundle with null route, is when the template could
1862 * not be resolved. It means policies are there, but
1863 * bundle could not be created, since we don't yet
1864 * have the xfrm_state's. We need to wait for KM to
1865 * negotiate new SA's or bail out with error.*/
1866 if (net
->xfrm
.sysctl_larval_drop
) {
1867 /* EREMOTE tells the caller to generate
1868 * a one-shot blackhole route. */
1870 xfrm_pols_put(pols
, drop_pols
);
1871 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
1873 return make_blackhole(net
, family
, dst_orig
);
1875 if (fl
->flowi_flags
& FLOWI_FLAG_CAN_SLEEP
) {
1876 DECLARE_WAITQUEUE(wait
, current
);
1878 add_wait_queue(&net
->xfrm
.km_waitq
, &wait
);
1879 set_current_state(TASK_INTERRUPTIBLE
);
1881 set_current_state(TASK_RUNNING
);
1882 remove_wait_queue(&net
->xfrm
.km_waitq
, &wait
);
1884 if (!signal_pending(current
)) {
1893 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
1901 if ((flags
& XFRM_LOOKUP_ICMP
) &&
1902 !(pols
[0]->flags
& XFRM_POLICY_ICMP
)) {
1907 for (i
= 0; i
< num_pols
; i
++)
1908 pols
[i
]->curlft
.use_time
= get_seconds();
1910 if (num_xfrms
< 0) {
1911 /* Prohibit the flow */
1912 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
1915 } else if (num_xfrms
> 0) {
1916 /* Flow transformed */
1917 dst_release(dst_orig
);
1919 /* Flow passes untransformed */
1924 xfrm_pols_put(pols
, drop_pols
);
1925 if (dst
&& dst
->xfrm
&&
1926 dst
->xfrm
->props
.mode
== XFRM_MODE_TUNNEL
)
1927 dst
->flags
|= DST_XFRM_TUNNEL
;
1931 if (!(flags
& XFRM_LOOKUP_ICMP
)) {
1939 dst_release(dst_orig
);
1940 xfrm_pols_put(pols
, drop_pols
);
1941 return ERR_PTR(err
);
1943 EXPORT_SYMBOL(xfrm_lookup
);
1946 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, const struct flowi
*fl
)
1948 struct xfrm_state
*x
;
1950 if (!skb
->sp
|| idx
< 0 || idx
>= skb
->sp
->len
)
1952 x
= skb
->sp
->xvec
[idx
];
1953 if (!x
->type
->reject
)
1955 return x
->type
->reject(x
, skb
, fl
);
1958 /* When skb is transformed back to its "native" form, we have to
1959 * check policy restrictions. At the moment we make this in maximally
1960 * stupid way. Shame on me. :-) Of course, connected sockets must
1961 * have policy cached at them.
1965 xfrm_state_ok(const struct xfrm_tmpl
*tmpl
, const struct xfrm_state
*x
,
1966 unsigned short family
)
1968 if (xfrm_state_kern(x
))
1969 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, tmpl
->encap_family
);
1970 return x
->id
.proto
== tmpl
->id
.proto
&&
1971 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
1972 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
1973 x
->props
.mode
== tmpl
->mode
&&
1974 (tmpl
->allalgs
|| (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
1975 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
1976 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
1977 xfrm_state_addr_cmp(tmpl
, x
, family
));
1981 * 0 or more than 0 is returned when validation is succeeded (either bypass
1982 * because of optional transport mode, or next index of the mathced secpath
1983 * state with the template.
1984 * -1 is returned when no matching template is found.
1985 * Otherwise "-2 - errored_index" is returned.
1988 xfrm_policy_ok(const struct xfrm_tmpl
*tmpl
, const struct sec_path
*sp
, int start
,
1989 unsigned short family
)
1993 if (tmpl
->optional
) {
1994 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
1998 for (; idx
< sp
->len
; idx
++) {
1999 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
))
2001 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
2010 int __xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
,
2011 unsigned int family
, int reverse
)
2013 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2016 if (unlikely(afinfo
== NULL
))
2017 return -EAFNOSUPPORT
;
2019 afinfo
->decode_session(skb
, fl
, reverse
);
2020 err
= security_xfrm_decode_session(skb
, &fl
->flowi_secid
);
2021 xfrm_policy_put_afinfo(afinfo
);
2024 EXPORT_SYMBOL(__xfrm_decode_session
);
2026 static inline int secpath_has_nontransport(const struct sec_path
*sp
, int k
, int *idxp
)
2028 for (; k
< sp
->len
; k
++) {
2029 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
2038 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
2039 unsigned short family
)
2041 struct net
*net
= dev_net(skb
->dev
);
2042 struct xfrm_policy
*pol
;
2043 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
2052 reverse
= dir
& ~XFRM_POLICY_MASK
;
2053 dir
&= XFRM_POLICY_MASK
;
2054 fl_dir
= policy_to_flow_dir(dir
);
2056 if (__xfrm_decode_session(skb
, &fl
, family
, reverse
) < 0) {
2057 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
2061 nf_nat_decode_session(skb
, &fl
, family
);
2063 /* First, check used SA against their selectors. */
2067 for (i
=skb
->sp
->len
-1; i
>=0; i
--) {
2068 struct xfrm_state
*x
= skb
->sp
->xvec
[i
];
2069 if (!xfrm_selector_match(&x
->sel
, &fl
, family
)) {
2070 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
2077 if (sk
&& sk
->sk_policy
[dir
]) {
2078 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
);
2080 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2086 struct flow_cache_object
*flo
;
2088 flo
= flow_cache_lookup(net
, &fl
, family
, fl_dir
,
2089 xfrm_policy_lookup
, NULL
);
2090 if (IS_ERR_OR_NULL(flo
))
2091 pol
= ERR_CAST(flo
);
2093 pol
= container_of(flo
, struct xfrm_policy
, flo
);
2097 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2102 if (skb
->sp
&& secpath_has_nontransport(skb
->sp
, 0, &xerr_idx
)) {
2103 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
2104 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
2110 pol
->curlft
.use_time
= get_seconds();
2114 #ifdef CONFIG_XFRM_SUB_POLICY
2115 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
2116 pols
[1] = xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
,
2120 if (IS_ERR(pols
[1])) {
2121 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2124 pols
[1]->curlft
.use_time
= get_seconds();
2130 if (pol
->action
== XFRM_POLICY_ALLOW
) {
2131 struct sec_path
*sp
;
2132 static struct sec_path dummy
;
2133 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
2134 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
2135 struct xfrm_tmpl
**tpp
= tp
;
2139 if ((sp
= skb
->sp
) == NULL
)
2142 for (pi
= 0; pi
< npols
; pi
++) {
2143 if (pols
[pi
] != pol
&&
2144 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
) {
2145 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
2148 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
2149 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
2152 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
2153 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
2157 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
);
2161 /* For each tunnel xfrm, find the first matching tmpl.
2162 * For each tmpl before that, find corresponding xfrm.
2163 * Order is _important_. Later we will implement
2164 * some barriers, but at the moment barriers
2165 * are implied between each two transformations.
2167 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
2168 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
);
2171 /* "-2 - errored_index" returned */
2173 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
2178 if (secpath_has_nontransport(sp
, k
, &xerr_idx
)) {
2179 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
2183 xfrm_pols_put(pols
, npols
);
2186 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
2189 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
2191 xfrm_pols_put(pols
, npols
);
2194 EXPORT_SYMBOL(__xfrm_policy_check
);
2196 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
2198 struct net
*net
= dev_net(skb
->dev
);
2200 struct dst_entry
*dst
;
2203 if (xfrm_decode_session(skb
, &fl
, family
) < 0) {
2204 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
2210 dst
= xfrm_lookup(net
, skb_dst(skb
), &fl
, NULL
, 0);
2215 skb_dst_set(skb
, dst
);
2218 EXPORT_SYMBOL(__xfrm_route_forward
);
2220 /* Optimize later using cookies and generation ids. */
2222 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
2224 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2225 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2226 * get validated by dst_ops->check on every use. We do this
2227 * because when a normal route referenced by an XFRM dst is
2228 * obsoleted we do not go looking around for all parent
2229 * referencing XFRM dsts so that we can invalidate them. It
2230 * is just too much work. Instead we make the checks here on
2231 * every use. For example:
2233 * XFRM dst A --> IPv4 dst X
2235 * X is the "xdst->route" of A (X is also the "dst->path" of A
2236 * in this example). If X is marked obsolete, "A" will not
2237 * notice. That's what we are validating here via the
2238 * stale_bundle() check.
2240 * When a policy's bundle is pruned, we dst_free() the XFRM
2241 * dst which causes it's ->obsolete field to be set to
2242 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
2243 * this, we want to force a new route lookup.
2245 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
2251 static int stale_bundle(struct dst_entry
*dst
)
2253 return !xfrm_bundle_ok((struct xfrm_dst
*)dst
);
2256 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
2258 while ((dst
= dst
->child
) && dst
->xfrm
&& dst
->dev
== dev
) {
2259 dst
->dev
= dev_net(dev
)->loopback_dev
;
2264 EXPORT_SYMBOL(xfrm_dst_ifdown
);
2266 static void xfrm_link_failure(struct sk_buff
*skb
)
2268 /* Impossible. Such dst must be popped before reaches point of failure. */
2271 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
2274 if (dst
->obsolete
) {
2282 static void __xfrm_garbage_collect(struct net
*net
)
2284 struct dst_entry
*head
, *next
;
2286 spin_lock_bh(&xfrm_policy_sk_bundle_lock
);
2287 head
= xfrm_policy_sk_bundles
;
2288 xfrm_policy_sk_bundles
= NULL
;
2289 spin_unlock_bh(&xfrm_policy_sk_bundle_lock
);
2298 static void xfrm_garbage_collect(struct net
*net
)
2301 __xfrm_garbage_collect(net
);
2304 static void xfrm_garbage_collect_deferred(struct net
*net
)
2306 flow_cache_flush_deferred();
2307 __xfrm_garbage_collect(net
);
2310 static void xfrm_init_pmtu(struct dst_entry
*dst
)
2313 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
2314 u32 pmtu
, route_mtu_cached
;
2316 pmtu
= dst_mtu(dst
->child
);
2317 xdst
->child_mtu_cached
= pmtu
;
2319 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
2321 route_mtu_cached
= dst_mtu(xdst
->route
);
2322 xdst
->route_mtu_cached
= route_mtu_cached
;
2324 if (pmtu
> route_mtu_cached
)
2325 pmtu
= route_mtu_cached
;
2327 dst_metric_set(dst
, RTAX_MTU
, pmtu
);
2328 } while ((dst
= dst
->next
));
2331 /* Check that the bundle accepts the flow and its components are
2335 static int xfrm_bundle_ok(struct xfrm_dst
*first
)
2337 struct dst_entry
*dst
= &first
->u
.dst
;
2338 struct xfrm_dst
*last
;
2341 if (!dst_check(dst
->path
, ((struct xfrm_dst
*)dst
)->path_cookie
) ||
2342 (dst
->dev
&& !netif_running(dst
->dev
)))
2348 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
2350 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
2352 if (xdst
->xfrm_genid
!= dst
->xfrm
->genid
)
2354 if (xdst
->num_pols
> 0 &&
2355 xdst
->policy_genid
!= atomic_read(&xdst
->pols
[0]->genid
))
2358 mtu
= dst_mtu(dst
->child
);
2359 if (xdst
->child_mtu_cached
!= mtu
) {
2361 xdst
->child_mtu_cached
= mtu
;
2364 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
2366 mtu
= dst_mtu(xdst
->route
);
2367 if (xdst
->route_mtu_cached
!= mtu
) {
2369 xdst
->route_mtu_cached
= mtu
;
2373 } while (dst
->xfrm
);
2378 mtu
= last
->child_mtu_cached
;
2382 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
2383 if (mtu
> last
->route_mtu_cached
)
2384 mtu
= last
->route_mtu_cached
;
2385 dst_metric_set(dst
, RTAX_MTU
, mtu
);
2390 last
= (struct xfrm_dst
*)last
->u
.dst
.next
;
2391 last
->child_mtu_cached
= mtu
;
2397 static unsigned int xfrm_default_advmss(const struct dst_entry
*dst
)
2399 return dst_metric_advmss(dst
->path
);
2402 static unsigned int xfrm_mtu(const struct dst_entry
*dst
)
2404 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
2406 return mtu
? : dst_mtu(dst
->path
);
2409 static struct neighbour
*xfrm_neigh_lookup(const struct dst_entry
*dst
,
2410 struct sk_buff
*skb
,
2413 return dst
->path
->ops
->neigh_lookup(dst
, skb
, daddr
);
2416 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2420 if (unlikely(afinfo
== NULL
))
2422 if (unlikely(afinfo
->family
>= NPROTO
))
2423 return -EAFNOSUPPORT
;
2424 write_lock_bh(&xfrm_policy_afinfo_lock
);
2425 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
))
2428 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
2429 if (likely(dst_ops
->kmem_cachep
== NULL
))
2430 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
2431 if (likely(dst_ops
->check
== NULL
))
2432 dst_ops
->check
= xfrm_dst_check
;
2433 if (likely(dst_ops
->default_advmss
== NULL
))
2434 dst_ops
->default_advmss
= xfrm_default_advmss
;
2435 if (likely(dst_ops
->mtu
== NULL
))
2436 dst_ops
->mtu
= xfrm_mtu
;
2437 if (likely(dst_ops
->negative_advice
== NULL
))
2438 dst_ops
->negative_advice
= xfrm_negative_advice
;
2439 if (likely(dst_ops
->link_failure
== NULL
))
2440 dst_ops
->link_failure
= xfrm_link_failure
;
2441 if (likely(dst_ops
->neigh_lookup
== NULL
))
2442 dst_ops
->neigh_lookup
= xfrm_neigh_lookup
;
2443 if (likely(afinfo
->garbage_collect
== NULL
))
2444 afinfo
->garbage_collect
= xfrm_garbage_collect_deferred
;
2445 xfrm_policy_afinfo
[afinfo
->family
] = afinfo
;
2447 write_unlock_bh(&xfrm_policy_afinfo_lock
);
2451 struct dst_ops
*xfrm_dst_ops
;
2453 switch (afinfo
->family
) {
2455 xfrm_dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
2457 #if IS_ENABLED(CONFIG_IPV6)
2459 xfrm_dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
2465 *xfrm_dst_ops
= *afinfo
->dst_ops
;
2471 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
2473 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2476 if (unlikely(afinfo
== NULL
))
2478 if (unlikely(afinfo
->family
>= NPROTO
))
2479 return -EAFNOSUPPORT
;
2480 write_lock_bh(&xfrm_policy_afinfo_lock
);
2481 if (likely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
)) {
2482 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != afinfo
))
2485 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
2486 xfrm_policy_afinfo
[afinfo
->family
] = NULL
;
2487 dst_ops
->kmem_cachep
= NULL
;
2488 dst_ops
->check
= NULL
;
2489 dst_ops
->negative_advice
= NULL
;
2490 dst_ops
->link_failure
= NULL
;
2491 afinfo
->garbage_collect
= NULL
;
2494 write_unlock_bh(&xfrm_policy_afinfo_lock
);
2497 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
2499 static void __net_init
xfrm_dst_ops_init(struct net
*net
)
2501 struct xfrm_policy_afinfo
*afinfo
;
2503 read_lock_bh(&xfrm_policy_afinfo_lock
);
2504 afinfo
= xfrm_policy_afinfo
[AF_INET
];
2506 net
->xfrm
.xfrm4_dst_ops
= *afinfo
->dst_ops
;
2507 #if IS_ENABLED(CONFIG_IPV6)
2508 afinfo
= xfrm_policy_afinfo
[AF_INET6
];
2510 net
->xfrm
.xfrm6_dst_ops
= *afinfo
->dst_ops
;
2512 read_unlock_bh(&xfrm_policy_afinfo_lock
);
2515 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
2517 struct xfrm_policy_afinfo
*afinfo
;
2518 if (unlikely(family
>= NPROTO
))
2520 read_lock(&xfrm_policy_afinfo_lock
);
2521 afinfo
= xfrm_policy_afinfo
[family
];
2522 if (unlikely(!afinfo
))
2523 read_unlock(&xfrm_policy_afinfo_lock
);
2527 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2529 read_unlock(&xfrm_policy_afinfo_lock
);
2532 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
2534 struct net_device
*dev
= ptr
;
2538 xfrm_garbage_collect(dev_net(dev
));
2543 static struct notifier_block xfrm_dev_notifier
= {
2544 .notifier_call
= xfrm_dev_event
,
2547 #ifdef CONFIG_XFRM_STATISTICS
2548 static int __net_init
xfrm_statistics_init(struct net
*net
)
2552 if (snmp_mib_init((void __percpu
**)net
->mib
.xfrm_statistics
,
2553 sizeof(struct linux_xfrm_mib
),
2554 __alignof__(struct linux_xfrm_mib
)) < 0)
2556 rv
= xfrm_proc_init(net
);
2558 snmp_mib_free((void __percpu
**)net
->mib
.xfrm_statistics
);
2562 static void xfrm_statistics_fini(struct net
*net
)
2564 xfrm_proc_fini(net
);
2565 snmp_mib_free((void __percpu
**)net
->mib
.xfrm_statistics
);
2568 static int __net_init
xfrm_statistics_init(struct net
*net
)
2573 static void xfrm_statistics_fini(struct net
*net
)
2578 static int __net_init
xfrm_policy_init(struct net
*net
)
2580 unsigned int hmask
, sz
;
2583 if (net_eq(net
, &init_net
))
2584 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
2585 sizeof(struct xfrm_dst
),
2586 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
2590 sz
= (hmask
+1) * sizeof(struct hlist_head
);
2592 net
->xfrm
.policy_byidx
= xfrm_hash_alloc(sz
);
2593 if (!net
->xfrm
.policy_byidx
)
2595 net
->xfrm
.policy_idx_hmask
= hmask
;
2597 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2598 struct xfrm_policy_hash
*htab
;
2600 net
->xfrm
.policy_count
[dir
] = 0;
2601 INIT_HLIST_HEAD(&net
->xfrm
.policy_inexact
[dir
]);
2603 htab
= &net
->xfrm
.policy_bydst
[dir
];
2604 htab
->table
= xfrm_hash_alloc(sz
);
2607 htab
->hmask
= hmask
;
2610 INIT_LIST_HEAD(&net
->xfrm
.policy_all
);
2611 INIT_WORK(&net
->xfrm
.policy_hash_work
, xfrm_hash_resize
);
2612 if (net_eq(net
, &init_net
))
2613 register_netdevice_notifier(&xfrm_dev_notifier
);
2617 for (dir
--; dir
>= 0; dir
--) {
2618 struct xfrm_policy_hash
*htab
;
2620 htab
= &net
->xfrm
.policy_bydst
[dir
];
2621 xfrm_hash_free(htab
->table
, sz
);
2623 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
2628 static void xfrm_policy_fini(struct net
*net
)
2630 struct xfrm_audit audit_info
;
2634 flush_work(&net
->xfrm
.policy_hash_work
);
2635 #ifdef CONFIG_XFRM_SUB_POLICY
2636 audit_info
.loginuid
= -1;
2637 audit_info
.sessionid
= -1;
2638 audit_info
.secid
= 0;
2639 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_SUB
, &audit_info
);
2641 audit_info
.loginuid
= -1;
2642 audit_info
.sessionid
= -1;
2643 audit_info
.secid
= 0;
2644 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_MAIN
, &audit_info
);
2646 WARN_ON(!list_empty(&net
->xfrm
.policy_all
));
2648 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2649 struct xfrm_policy_hash
*htab
;
2651 WARN_ON(!hlist_empty(&net
->xfrm
.policy_inexact
[dir
]));
2653 htab
= &net
->xfrm
.policy_bydst
[dir
];
2654 sz
= (htab
->hmask
+ 1);
2655 WARN_ON(!hlist_empty(htab
->table
));
2656 xfrm_hash_free(htab
->table
, sz
);
2659 sz
= (net
->xfrm
.policy_idx_hmask
+ 1) * sizeof(struct hlist_head
);
2660 WARN_ON(!hlist_empty(net
->xfrm
.policy_byidx
));
2661 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
2664 static int __net_init
xfrm_net_init(struct net
*net
)
2668 rv
= xfrm_statistics_init(net
);
2670 goto out_statistics
;
2671 rv
= xfrm_state_init(net
);
2674 rv
= xfrm_policy_init(net
);
2677 xfrm_dst_ops_init(net
);
2678 rv
= xfrm_sysctl_init(net
);
2684 xfrm_policy_fini(net
);
2686 xfrm_state_fini(net
);
2688 xfrm_statistics_fini(net
);
2693 static void __net_exit
xfrm_net_exit(struct net
*net
)
2695 xfrm_sysctl_fini(net
);
2696 xfrm_policy_fini(net
);
2697 xfrm_state_fini(net
);
2698 xfrm_statistics_fini(net
);
2701 static struct pernet_operations __net_initdata xfrm_net_ops
= {
2702 .init
= xfrm_net_init
,
2703 .exit
= xfrm_net_exit
,
2706 void __init
xfrm_init(void)
2708 register_pernet_subsys(&xfrm_net_ops
);
2712 #ifdef CONFIG_AUDITSYSCALL
2713 static void xfrm_audit_common_policyinfo(struct xfrm_policy
*xp
,
2714 struct audit_buffer
*audit_buf
)
2716 struct xfrm_sec_ctx
*ctx
= xp
->security
;
2717 struct xfrm_selector
*sel
= &xp
->selector
;
2720 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
2721 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
2723 switch(sel
->family
) {
2725 audit_log_format(audit_buf
, " src=%pI4", &sel
->saddr
.a4
);
2726 if (sel
->prefixlen_s
!= 32)
2727 audit_log_format(audit_buf
, " src_prefixlen=%d",
2729 audit_log_format(audit_buf
, " dst=%pI4", &sel
->daddr
.a4
);
2730 if (sel
->prefixlen_d
!= 32)
2731 audit_log_format(audit_buf
, " dst_prefixlen=%d",
2735 audit_log_format(audit_buf
, " src=%pI6", sel
->saddr
.a6
);
2736 if (sel
->prefixlen_s
!= 128)
2737 audit_log_format(audit_buf
, " src_prefixlen=%d",
2739 audit_log_format(audit_buf
, " dst=%pI6", sel
->daddr
.a6
);
2740 if (sel
->prefixlen_d
!= 128)
2741 audit_log_format(audit_buf
, " dst_prefixlen=%d",
2747 void xfrm_audit_policy_add(struct xfrm_policy
*xp
, int result
,
2748 uid_t auid
, u32 sessionid
, u32 secid
)
2750 struct audit_buffer
*audit_buf
;
2752 audit_buf
= xfrm_audit_start("SPD-add");
2753 if (audit_buf
== NULL
)
2755 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
2756 audit_log_format(audit_buf
, " res=%u", result
);
2757 xfrm_audit_common_policyinfo(xp
, audit_buf
);
2758 audit_log_end(audit_buf
);
2760 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add
);
2762 void xfrm_audit_policy_delete(struct xfrm_policy
*xp
, int result
,
2763 uid_t auid
, u32 sessionid
, u32 secid
)
2765 struct audit_buffer
*audit_buf
;
2767 audit_buf
= xfrm_audit_start("SPD-delete");
2768 if (audit_buf
== NULL
)
2770 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
2771 audit_log_format(audit_buf
, " res=%u", result
);
2772 xfrm_audit_common_policyinfo(xp
, audit_buf
);
2773 audit_log_end(audit_buf
);
2775 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete
);
2778 #ifdef CONFIG_XFRM_MIGRATE
2779 static bool xfrm_migrate_selector_match(const struct xfrm_selector
*sel_cmp
,
2780 const struct xfrm_selector
*sel_tgt
)
2782 if (sel_cmp
->proto
== IPSEC_ULPROTO_ANY
) {
2783 if (sel_tgt
->family
== sel_cmp
->family
&&
2784 xfrm_addr_cmp(&sel_tgt
->daddr
, &sel_cmp
->daddr
,
2785 sel_cmp
->family
) == 0 &&
2786 xfrm_addr_cmp(&sel_tgt
->saddr
, &sel_cmp
->saddr
,
2787 sel_cmp
->family
) == 0 &&
2788 sel_tgt
->prefixlen_d
== sel_cmp
->prefixlen_d
&&
2789 sel_tgt
->prefixlen_s
== sel_cmp
->prefixlen_s
) {
2793 if (memcmp(sel_tgt
, sel_cmp
, sizeof(*sel_tgt
)) == 0) {
2800 static struct xfrm_policy
* xfrm_migrate_policy_find(const struct xfrm_selector
*sel
,
2803 struct xfrm_policy
*pol
, *ret
= NULL
;
2804 struct hlist_node
*entry
;
2805 struct hlist_head
*chain
;
2808 read_lock_bh(&xfrm_policy_lock
);
2809 chain
= policy_hash_direct(&init_net
, &sel
->daddr
, &sel
->saddr
, sel
->family
, dir
);
2810 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
2811 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
2812 pol
->type
== type
) {
2814 priority
= ret
->priority
;
2818 chain
= &init_net
.xfrm
.policy_inexact
[dir
];
2819 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
2820 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
2821 pol
->type
== type
&&
2822 pol
->priority
< priority
) {
2831 read_unlock_bh(&xfrm_policy_lock
);
2836 static int migrate_tmpl_match(const struct xfrm_migrate
*m
, const struct xfrm_tmpl
*t
)
2840 if (t
->mode
== m
->mode
&& t
->id
.proto
== m
->proto
&&
2841 (m
->reqid
== 0 || t
->reqid
== m
->reqid
)) {
2843 case XFRM_MODE_TUNNEL
:
2844 case XFRM_MODE_BEET
:
2845 if (xfrm_addr_cmp(&t
->id
.daddr
, &m
->old_daddr
,
2846 m
->old_family
) == 0 &&
2847 xfrm_addr_cmp(&t
->saddr
, &m
->old_saddr
,
2848 m
->old_family
) == 0) {
2852 case XFRM_MODE_TRANSPORT
:
2853 /* in case of transport mode, template does not store
2854 any IP addresses, hence we just compare mode and
2865 /* update endpoint address(es) of template(s) */
2866 static int xfrm_policy_migrate(struct xfrm_policy
*pol
,
2867 struct xfrm_migrate
*m
, int num_migrate
)
2869 struct xfrm_migrate
*mp
;
2872 write_lock_bh(&pol
->lock
);
2873 if (unlikely(pol
->walk
.dead
)) {
2874 /* target policy has been deleted */
2875 write_unlock_bh(&pol
->lock
);
2879 for (i
= 0; i
< pol
->xfrm_nr
; i
++) {
2880 for (j
= 0, mp
= m
; j
< num_migrate
; j
++, mp
++) {
2881 if (!migrate_tmpl_match(mp
, &pol
->xfrm_vec
[i
]))
2884 if (pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_TUNNEL
&&
2885 pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_BEET
)
2887 /* update endpoints */
2888 memcpy(&pol
->xfrm_vec
[i
].id
.daddr
, &mp
->new_daddr
,
2889 sizeof(pol
->xfrm_vec
[i
].id
.daddr
));
2890 memcpy(&pol
->xfrm_vec
[i
].saddr
, &mp
->new_saddr
,
2891 sizeof(pol
->xfrm_vec
[i
].saddr
));
2892 pol
->xfrm_vec
[i
].encap_family
= mp
->new_family
;
2894 atomic_inc(&pol
->genid
);
2898 write_unlock_bh(&pol
->lock
);
2906 static int xfrm_migrate_check(const struct xfrm_migrate
*m
, int num_migrate
)
2910 if (num_migrate
< 1 || num_migrate
> XFRM_MAX_DEPTH
)
2913 for (i
= 0; i
< num_migrate
; i
++) {
2914 if ((xfrm_addr_cmp(&m
[i
].old_daddr
, &m
[i
].new_daddr
,
2915 m
[i
].old_family
) == 0) &&
2916 (xfrm_addr_cmp(&m
[i
].old_saddr
, &m
[i
].new_saddr
,
2917 m
[i
].old_family
) == 0))
2919 if (xfrm_addr_any(&m
[i
].new_daddr
, m
[i
].new_family
) ||
2920 xfrm_addr_any(&m
[i
].new_saddr
, m
[i
].new_family
))
2923 /* check if there is any duplicated entry */
2924 for (j
= i
+ 1; j
< num_migrate
; j
++) {
2925 if (!memcmp(&m
[i
].old_daddr
, &m
[j
].old_daddr
,
2926 sizeof(m
[i
].old_daddr
)) &&
2927 !memcmp(&m
[i
].old_saddr
, &m
[j
].old_saddr
,
2928 sizeof(m
[i
].old_saddr
)) &&
2929 m
[i
].proto
== m
[j
].proto
&&
2930 m
[i
].mode
== m
[j
].mode
&&
2931 m
[i
].reqid
== m
[j
].reqid
&&
2932 m
[i
].old_family
== m
[j
].old_family
)
2940 int xfrm_migrate(const struct xfrm_selector
*sel
, u8 dir
, u8 type
,
2941 struct xfrm_migrate
*m
, int num_migrate
,
2942 struct xfrm_kmaddress
*k
)
2944 int i
, err
, nx_cur
= 0, nx_new
= 0;
2945 struct xfrm_policy
*pol
= NULL
;
2946 struct xfrm_state
*x
, *xc
;
2947 struct xfrm_state
*x_cur
[XFRM_MAX_DEPTH
];
2948 struct xfrm_state
*x_new
[XFRM_MAX_DEPTH
];
2949 struct xfrm_migrate
*mp
;
2951 if ((err
= xfrm_migrate_check(m
, num_migrate
)) < 0)
2954 /* Stage 1 - find policy */
2955 if ((pol
= xfrm_migrate_policy_find(sel
, dir
, type
)) == NULL
) {
2960 /* Stage 2 - find and update state(s) */
2961 for (i
= 0, mp
= m
; i
< num_migrate
; i
++, mp
++) {
2962 if ((x
= xfrm_migrate_state_find(mp
))) {
2965 if ((xc
= xfrm_state_migrate(x
, mp
))) {
2975 /* Stage 3 - update policy */
2976 if ((err
= xfrm_policy_migrate(pol
, m
, num_migrate
)) < 0)
2979 /* Stage 4 - delete old state(s) */
2981 xfrm_states_put(x_cur
, nx_cur
);
2982 xfrm_states_delete(x_cur
, nx_cur
);
2985 /* Stage 5 - announce */
2986 km_migrate(sel
, dir
, type
, m
, num_migrate
, k
);
2998 xfrm_states_put(x_cur
, nx_cur
);
3000 xfrm_states_delete(x_new
, nx_new
);
3004 EXPORT_SYMBOL(xfrm_migrate
);