[PATCH] slab: remove kmem_cache_t
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / xfrm6_tunnel.c
1 /*
2 * Copyright (C)2003,2004 USAGI/WIDE Project
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
20 *
21 * Based on net/ipv4/xfrm4_tunnel.c
22 *
23 */
24 #include <linux/module.h>
25 #include <linux/xfrm.h>
26 #include <linux/list.h>
27 #include <net/ip.h>
28 #include <net/xfrm.h>
29 #include <net/ipv6.h>
30 #include <linux/ipv6.h>
31 #include <linux/icmpv6.h>
32 #include <linux/mutex.h>
33
34 /*
35 * xfrm_tunnel_spi things are for allocating unique id ("spi")
36 * per xfrm_address_t.
37 */
38 struct xfrm6_tunnel_spi {
39 struct hlist_node list_byaddr;
40 struct hlist_node list_byspi;
41 xfrm_address_t addr;
42 u32 spi;
43 atomic_t refcnt;
44 };
45
46 static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
47
48 static u32 xfrm6_tunnel_spi;
49
50 #define XFRM6_TUNNEL_SPI_MIN 1
51 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff
52
53 static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
54
55 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
56 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
57
58 static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
59 static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
60
61 static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
62 {
63 unsigned h;
64
65 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
66 h ^= h >> 16;
67 h ^= h >> 8;
68 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
69
70 return h;
71 }
72
73 static unsigned inline xfrm6_tunnel_spi_hash_byspi(u32 spi)
74 {
75 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
76 }
77
78
79 static int xfrm6_tunnel_spi_init(void)
80 {
81 int i;
82
83 xfrm6_tunnel_spi = 0;
84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
85 sizeof(struct xfrm6_tunnel_spi),
86 0, SLAB_HWCACHE_ALIGN,
87 NULL, NULL);
88 if (!xfrm6_tunnel_spi_kmem)
89 return -ENOMEM;
90
91 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
92 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
93 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
94 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
95 return 0;
96 }
97
98 static void xfrm6_tunnel_spi_fini(void)
99 {
100 int i;
101
102 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
103 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
104 return;
105 }
106 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
107 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
108 return;
109 }
110 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
111 xfrm6_tunnel_spi_kmem = NULL;
112 }
113
114 static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
115 {
116 struct xfrm6_tunnel_spi *x6spi;
117 struct hlist_node *pos;
118
119 hlist_for_each_entry(x6spi, pos,
120 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
121 list_byaddr) {
122 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
123 return x6spi;
124 }
125
126 return NULL;
127 }
128
129 __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
130 {
131 struct xfrm6_tunnel_spi *x6spi;
132 u32 spi;
133
134 read_lock_bh(&xfrm6_tunnel_spi_lock);
135 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
136 spi = x6spi ? x6spi->spi : 0;
137 read_unlock_bh(&xfrm6_tunnel_spi_lock);
138 return htonl(spi);
139 }
140
141 EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
142
143 static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
144 {
145 u32 spi;
146 struct xfrm6_tunnel_spi *x6spi;
147 struct hlist_node *pos;
148 unsigned index;
149
150 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
151 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
152 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
153 else
154 xfrm6_tunnel_spi++;
155
156 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
157 index = xfrm6_tunnel_spi_hash_byspi(spi);
158 hlist_for_each_entry(x6spi, pos,
159 &xfrm6_tunnel_spi_byspi[index],
160 list_byspi) {
161 if (x6spi->spi == spi)
162 goto try_next_1;
163 }
164 xfrm6_tunnel_spi = spi;
165 goto alloc_spi;
166 try_next_1:;
167 }
168 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) {
169 index = xfrm6_tunnel_spi_hash_byspi(spi);
170 hlist_for_each_entry(x6spi, pos,
171 &xfrm6_tunnel_spi_byspi[index],
172 list_byspi) {
173 if (x6spi->spi == spi)
174 goto try_next_2;
175 }
176 xfrm6_tunnel_spi = spi;
177 goto alloc_spi;
178 try_next_2:;
179 }
180 spi = 0;
181 goto out;
182 alloc_spi:
183 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
184 if (!x6spi)
185 goto out;
186
187 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
188 x6spi->spi = spi;
189 atomic_set(&x6spi->refcnt, 1);
190
191 hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
192
193 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
194 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
195 out:
196 return spi;
197 }
198
199 __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
200 {
201 struct xfrm6_tunnel_spi *x6spi;
202 u32 spi;
203
204 write_lock_bh(&xfrm6_tunnel_spi_lock);
205 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
206 if (x6spi) {
207 atomic_inc(&x6spi->refcnt);
208 spi = x6spi->spi;
209 } else
210 spi = __xfrm6_tunnel_alloc_spi(saddr);
211 write_unlock_bh(&xfrm6_tunnel_spi_lock);
212
213 return htonl(spi);
214 }
215
216 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
217
218 void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
219 {
220 struct xfrm6_tunnel_spi *x6spi;
221 struct hlist_node *pos, *n;
222
223 write_lock_bh(&xfrm6_tunnel_spi_lock);
224
225 hlist_for_each_entry_safe(x6spi, pos, n,
226 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
227 list_byaddr)
228 {
229 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
230 if (atomic_dec_and_test(&x6spi->refcnt)) {
231 hlist_del(&x6spi->list_byaddr);
232 hlist_del(&x6spi->list_byspi);
233 kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi);
234 break;
235 }
236 }
237 }
238 write_unlock_bh(&xfrm6_tunnel_spi_lock);
239 }
240
241 EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
242
243 static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
244 {
245 struct ipv6hdr *top_iph;
246
247 top_iph = (struct ipv6hdr *)skb->data;
248 top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
249
250 return 0;
251 }
252
253 static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
254 {
255 return 0;
256 }
257
258 static int xfrm6_tunnel_rcv(struct sk_buff *skb)
259 {
260 struct ipv6hdr *iph = skb->nh.ipv6h;
261 __be32 spi;
262
263 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
264 return xfrm6_rcv_spi(skb, spi);
265 }
266
267 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
268 int type, int code, int offset, __be32 info)
269 {
270 /* xfrm6_tunnel native err handling */
271 switch (type) {
272 case ICMPV6_DEST_UNREACH:
273 switch (code) {
274 case ICMPV6_NOROUTE:
275 case ICMPV6_ADM_PROHIBITED:
276 case ICMPV6_NOT_NEIGHBOUR:
277 case ICMPV6_ADDR_UNREACH:
278 case ICMPV6_PORT_UNREACH:
279 default:
280 break;
281 }
282 break;
283 case ICMPV6_PKT_TOOBIG:
284 break;
285 case ICMPV6_TIME_EXCEED:
286 switch (code) {
287 case ICMPV6_EXC_HOPLIMIT:
288 break;
289 case ICMPV6_EXC_FRAGTIME:
290 default:
291 break;
292 }
293 break;
294 case ICMPV6_PARAMPROB:
295 switch (code) {
296 case ICMPV6_HDR_FIELD: break;
297 case ICMPV6_UNK_NEXTHDR: break;
298 case ICMPV6_UNK_OPTION: break;
299 }
300 break;
301 default:
302 break;
303 }
304
305 return 0;
306 }
307
308 static int xfrm6_tunnel_init_state(struct xfrm_state *x)
309 {
310 if (x->props.mode != XFRM_MODE_TUNNEL)
311 return -EINVAL;
312
313 if (x->encap)
314 return -EINVAL;
315
316 x->props.header_len = sizeof(struct ipv6hdr);
317
318 return 0;
319 }
320
321 static void xfrm6_tunnel_destroy(struct xfrm_state *x)
322 {
323 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
324 }
325
326 static struct xfrm_type xfrm6_tunnel_type = {
327 .description = "IP6IP6",
328 .owner = THIS_MODULE,
329 .proto = IPPROTO_IPV6,
330 .init_state = xfrm6_tunnel_init_state,
331 .destructor = xfrm6_tunnel_destroy,
332 .input = xfrm6_tunnel_input,
333 .output = xfrm6_tunnel_output,
334 };
335
336 static struct xfrm6_tunnel xfrm6_tunnel_handler = {
337 .handler = xfrm6_tunnel_rcv,
338 .err_handler = xfrm6_tunnel_err,
339 .priority = 2,
340 };
341
342 static int __init xfrm6_tunnel_init(void)
343 {
344 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
345 return -EAGAIN;
346
347 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) {
348 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
349 return -EAGAIN;
350 }
351 if (xfrm6_tunnel_spi_init() < 0) {
352 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
353 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
354 return -EAGAIN;
355 }
356 return 0;
357 }
358
359 static void __exit xfrm6_tunnel_fini(void)
360 {
361 xfrm6_tunnel_spi_fini();
362 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
363 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
364 }
365
366 module_init(xfrm6_tunnel_init);
367 module_exit(xfrm6_tunnel_fini);
368 MODULE_LICENSE("GPL");