net: fix hw_features ethtool_ops->set_flags compatibility
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / xfrm4_policy.c
... / ...
CommitLineData
1/*
2 * xfrm4_policy.c
3 *
4 * Changes:
5 * Kazunori MIYAZAWA @USAGI
6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion
8 *
9 */
10
11#include <linux/err.h>
12#include <linux/kernel.h>
13#include <linux/inetdevice.h>
14#include <linux/if_tunnel.h>
15#include <net/dst.h>
16#include <net/xfrm.h>
17#include <net/ip.h>
18
19static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
20
21static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
22 const xfrm_address_t *saddr,
23 const xfrm_address_t *daddr)
24{
25 struct flowi4 fl4 = {
26 .daddr = daddr->a4,
27 .flowi4_tos = tos,
28 };
29 struct rtable *rt;
30
31 if (saddr)
32 fl4.saddr = saddr->a4;
33
34 rt = __ip_route_output_key(net, &fl4);
35 if (!IS_ERR(rt))
36 return &rt->dst;
37
38 return ERR_CAST(rt);
39}
40
41static int xfrm4_get_saddr(struct net *net,
42 xfrm_address_t *saddr, xfrm_address_t *daddr)
43{
44 struct dst_entry *dst;
45 struct rtable *rt;
46
47 dst = xfrm4_dst_lookup(net, 0, NULL, daddr);
48 if (IS_ERR(dst))
49 return -EHOSTUNREACH;
50
51 rt = (struct rtable *)dst;
52 saddr->a4 = rt->rt_src;
53 dst_release(dst);
54 return 0;
55}
56
57static int xfrm4_get_tos(const struct flowi *fl)
58{
59 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */
60}
61
62static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
63 int nfheader_len)
64{
65 return 0;
66}
67
68static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
69 const struct flowi *fl)
70{
71 struct rtable *rt = (struct rtable *)xdst->route;
72 const struct flowi4 *fl4 = &fl->u.ip4;
73
74 rt->rt_key_dst = fl4->daddr;
75 rt->rt_key_src = fl4->saddr;
76 rt->rt_tos = fl4->flowi4_tos;
77 rt->rt_route_iif = fl4->flowi4_iif;
78 rt->rt_iif = fl4->flowi4_iif;
79 rt->rt_oif = fl4->flowi4_oif;
80 rt->rt_mark = fl4->flowi4_mark;
81
82 xdst->u.dst.dev = dev;
83 dev_hold(dev);
84
85 xdst->u.rt.peer = rt->peer;
86 if (rt->peer)
87 atomic_inc(&rt->peer->refcnt);
88
89 /* Sheit... I remember I did this right. Apparently,
90 * it was magically lost, so this code needs audit */
91 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
92 RTCF_LOCAL);
93 xdst->u.rt.rt_type = rt->rt_type;
94 xdst->u.rt.rt_src = rt->rt_src;
95 xdst->u.rt.rt_dst = rt->rt_dst;
96 xdst->u.rt.rt_gateway = rt->rt_gateway;
97 xdst->u.rt.rt_spec_dst = rt->rt_spec_dst;
98
99 return 0;
100}
101
102static void
103_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104{
105 struct iphdr *iph = ip_hdr(skb);
106 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
107 struct flowi4 *fl4 = &fl->u.ip4;
108
109 memset(fl4, 0, sizeof(struct flowi4));
110 fl4->flowi4_mark = skb->mark;
111
112 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
113 switch (iph->protocol) {
114 case IPPROTO_UDP:
115 case IPPROTO_UDPLITE:
116 case IPPROTO_TCP:
117 case IPPROTO_SCTP:
118 case IPPROTO_DCCP:
119 if (xprth + 4 < skb->data ||
120 pskb_may_pull(skb, xprth + 4 - skb->data)) {
121 __be16 *ports = (__be16 *)xprth;
122
123 fl4->fl4_sport = ports[!!reverse];
124 fl4->fl4_dport = ports[!reverse];
125 }
126 break;
127
128 case IPPROTO_ICMP:
129 if (pskb_may_pull(skb, xprth + 2 - skb->data)) {
130 u8 *icmp = xprth;
131
132 fl4->fl4_icmp_type = icmp[0];
133 fl4->fl4_icmp_code = icmp[1];
134 }
135 break;
136
137 case IPPROTO_ESP:
138 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
139 __be32 *ehdr = (__be32 *)xprth;
140
141 fl4->fl4_ipsec_spi = ehdr[0];
142 }
143 break;
144
145 case IPPROTO_AH:
146 if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
147 __be32 *ah_hdr = (__be32*)xprth;
148
149 fl4->fl4_ipsec_spi = ah_hdr[1];
150 }
151 break;
152
153 case IPPROTO_COMP:
154 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
155 __be16 *ipcomp_hdr = (__be16 *)xprth;
156
157 fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
158 }
159 break;
160
161 case IPPROTO_GRE:
162 if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
163 __be16 *greflags = (__be16 *)xprth;
164 __be32 *gre_hdr = (__be32 *)xprth;
165
166 if (greflags[0] & GRE_KEY) {
167 if (greflags[0] & GRE_CSUM)
168 gre_hdr++;
169 fl4->fl4_gre_key = gre_hdr[1];
170 }
171 }
172 break;
173
174 default:
175 fl4->fl4_ipsec_spi = 0;
176 break;
177 }
178 }
179 fl4->flowi4_proto = iph->protocol;
180 fl4->daddr = reverse ? iph->saddr : iph->daddr;
181 fl4->saddr = reverse ? iph->daddr : iph->saddr;
182 fl4->flowi4_tos = iph->tos;
183}
184
185static inline int xfrm4_garbage_collect(struct dst_ops *ops)
186{
187 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
188
189 xfrm4_policy_afinfo.garbage_collect(net);
190 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
191}
192
193static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
194{
195 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
196 struct dst_entry *path = xdst->route;
197
198 path->ops->update_pmtu(path, mtu);
199}
200
201static void xfrm4_dst_destroy(struct dst_entry *dst)
202{
203 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
204
205 dst_destroy_metrics_generic(dst);
206
207 if (likely(xdst->u.rt.peer))
208 inet_putpeer(xdst->u.rt.peer);
209
210 xfrm_dst_destroy(xdst);
211}
212
213static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
214 int unregister)
215{
216 if (!unregister)
217 return;
218
219 xfrm_dst_ifdown(dst, dev);
220}
221
222static struct dst_ops xfrm4_dst_ops = {
223 .family = AF_INET,
224 .protocol = cpu_to_be16(ETH_P_IP),
225 .gc = xfrm4_garbage_collect,
226 .update_pmtu = xfrm4_update_pmtu,
227 .cow_metrics = dst_cow_metrics_generic,
228 .destroy = xfrm4_dst_destroy,
229 .ifdown = xfrm4_dst_ifdown,
230 .local_out = __ip_local_out,
231 .gc_thresh = 1024,
232};
233
234static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
235 .family = AF_INET,
236 .dst_ops = &xfrm4_dst_ops,
237 .dst_lookup = xfrm4_dst_lookup,
238 .get_saddr = xfrm4_get_saddr,
239 .decode_session = _decode_session4,
240 .get_tos = xfrm4_get_tos,
241 .init_path = xfrm4_init_path,
242 .fill_dst = xfrm4_fill_dst,
243 .blackhole_route = ipv4_blackhole_route,
244};
245
246#ifdef CONFIG_SYSCTL
247static struct ctl_table xfrm4_policy_table[] = {
248 {
249 .procname = "xfrm4_gc_thresh",
250 .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
251 .maxlen = sizeof(int),
252 .mode = 0644,
253 .proc_handler = proc_dointvec,
254 },
255 { }
256};
257
258static struct ctl_table_header *sysctl_hdr;
259#endif
260
261static void __init xfrm4_policy_init(void)
262{
263 xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
264}
265
266static void __exit xfrm4_policy_fini(void)
267{
268#ifdef CONFIG_SYSCTL
269 if (sysctl_hdr)
270 unregister_net_sysctl_table(sysctl_hdr);
271#endif
272 xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
273}
274
275void __init xfrm4_init(int rt_max_size)
276{
277 /*
278 * Select a default value for the gc_thresh based on the main route
279 * table hash size. It seems to me the worst case scenario is when
280 * we have ipsec operating in transport mode, in which we create a
281 * dst_entry per socket. The xfrm gc algorithm starts trying to remove
282 * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
283 * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
284 * That will let us store an ipsec connection per route table entry,
285 * and start cleaning when were 1/2 full
286 */
287 xfrm4_dst_ops.gc_thresh = rt_max_size/2;
288 dst_entries_init(&xfrm4_dst_ops);
289
290 xfrm4_state_init();
291 xfrm4_policy_init();
292#ifdef CONFIG_SYSCTL
293 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
294 xfrm4_policy_table);
295#endif
296}
297