Merge branch 'master' into next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / ipvs / ip_vs_core.c
1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others.
20 *
21 * Changes:
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
24 *
25 */
26
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
45
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54
55 #include <net/ip_vs.h>
56
57
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71
72 int ip_vs_net_id __read_mostly;
73 #ifdef IP_VS_GENERIC_NETNS
74 EXPORT_SYMBOL(ip_vs_net_id);
75 #endif
76 /* netns cnt used for uniqueness */
77 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
78
79 /* ID used in ICMP lookups */
80 #define icmp_id(icmph) (((icmph)->un).echo.id)
81 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
82
83 const char *ip_vs_proto_name(unsigned proto)
84 {
85 static char buf[20];
86
87 switch (proto) {
88 case IPPROTO_IP:
89 return "IP";
90 case IPPROTO_UDP:
91 return "UDP";
92 case IPPROTO_TCP:
93 return "TCP";
94 case IPPROTO_SCTP:
95 return "SCTP";
96 case IPPROTO_ICMP:
97 return "ICMP";
98 #ifdef CONFIG_IP_VS_IPV6
99 case IPPROTO_ICMPV6:
100 return "ICMPv6";
101 #endif
102 default:
103 sprintf(buf, "IP_%d", proto);
104 return buf;
105 }
106 }
107
108 void ip_vs_init_hash_table(struct list_head *table, int rows)
109 {
110 while (--rows >= 0)
111 INIT_LIST_HEAD(&table[rows]);
112 }
113
114 static inline void
115 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
116 {
117 struct ip_vs_dest *dest = cp->dest;
118 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
119
120 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
121 struct ip_vs_cpu_stats *s;
122
123 s = this_cpu_ptr(dest->stats.cpustats);
124 s->ustats.inpkts++;
125 u64_stats_update_begin(&s->syncp);
126 s->ustats.inbytes += skb->len;
127 u64_stats_update_end(&s->syncp);
128
129 s = this_cpu_ptr(dest->svc->stats.cpustats);
130 s->ustats.inpkts++;
131 u64_stats_update_begin(&s->syncp);
132 s->ustats.inbytes += skb->len;
133 u64_stats_update_end(&s->syncp);
134
135 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
136 s->ustats.inpkts++;
137 u64_stats_update_begin(&s->syncp);
138 s->ustats.inbytes += skb->len;
139 u64_stats_update_end(&s->syncp);
140 }
141 }
142
143
144 static inline void
145 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
146 {
147 struct ip_vs_dest *dest = cp->dest;
148 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
149
150 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
151 struct ip_vs_cpu_stats *s;
152
153 s = this_cpu_ptr(dest->stats.cpustats);
154 s->ustats.outpkts++;
155 u64_stats_update_begin(&s->syncp);
156 s->ustats.outbytes += skb->len;
157 u64_stats_update_end(&s->syncp);
158
159 s = this_cpu_ptr(dest->svc->stats.cpustats);
160 s->ustats.outpkts++;
161 u64_stats_update_begin(&s->syncp);
162 s->ustats.outbytes += skb->len;
163 u64_stats_update_end(&s->syncp);
164
165 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
166 s->ustats.outpkts++;
167 u64_stats_update_begin(&s->syncp);
168 s->ustats.outbytes += skb->len;
169 u64_stats_update_end(&s->syncp);
170 }
171 }
172
173
174 static inline void
175 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
176 {
177 struct netns_ipvs *ipvs = net_ipvs(svc->net);
178 struct ip_vs_cpu_stats *s;
179
180 s = this_cpu_ptr(cp->dest->stats.cpustats);
181 s->ustats.conns++;
182
183 s = this_cpu_ptr(svc->stats.cpustats);
184 s->ustats.conns++;
185
186 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
187 s->ustats.conns++;
188 }
189
190
191 static inline int
192 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
193 const struct sk_buff *skb,
194 struct ip_vs_proto_data *pd)
195 {
196 if (unlikely(!pd->pp->state_transition))
197 return 0;
198 return pd->pp->state_transition(cp, direction, skb, pd);
199 }
200
201 static inline int
202 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
203 struct sk_buff *skb, int protocol,
204 const union nf_inet_addr *caddr, __be16 cport,
205 const union nf_inet_addr *vaddr, __be16 vport,
206 struct ip_vs_conn_param *p)
207 {
208 ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
209 vport, p);
210 p->pe = svc->pe;
211 if (p->pe && p->pe->fill_param)
212 return p->pe->fill_param(p, skb);
213
214 return 0;
215 }
216
217 /*
218 * IPVS persistent scheduling function
219 * It creates a connection entry according to its template if exists,
220 * or selects a server and creates a connection entry plus a template.
221 * Locking: we are svc user (svc->refcnt), so we hold all dests too
222 * Protocols supported: TCP, UDP
223 */
224 static struct ip_vs_conn *
225 ip_vs_sched_persist(struct ip_vs_service *svc,
226 struct sk_buff *skb,
227 __be16 src_port, __be16 dst_port, int *ignored)
228 {
229 struct ip_vs_conn *cp = NULL;
230 struct ip_vs_iphdr iph;
231 struct ip_vs_dest *dest;
232 struct ip_vs_conn *ct;
233 __be16 dport = 0; /* destination port to forward */
234 unsigned int flags;
235 struct ip_vs_conn_param param;
236 union nf_inet_addr snet; /* source network of the client,
237 after masking */
238
239 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
240
241 /* Mask saddr with the netmask to adjust template granularity */
242 #ifdef CONFIG_IP_VS_IPV6
243 if (svc->af == AF_INET6)
244 ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask);
245 else
246 #endif
247 snet.ip = iph.saddr.ip & svc->netmask;
248
249 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
250 "mnet %s\n",
251 IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
252 IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
253 IP_VS_DBG_ADDR(svc->af, &snet));
254
255 /*
256 * As far as we know, FTP is a very complicated network protocol, and
257 * it uses control connection and data connections. For active FTP,
258 * FTP server initialize data connection to the client, its source port
259 * is often 20. For passive FTP, FTP server tells the clients the port
260 * that it passively listens to, and the client issues the data
261 * connection. In the tunneling or direct routing mode, the load
262 * balancer is on the client-to-server half of connection, the port
263 * number is unknown to the load balancer. So, a conn template like
264 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
265 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
266 * is created for other persistent services.
267 */
268 {
269 int protocol = iph.protocol;
270 const union nf_inet_addr *vaddr = &iph.daddr;
271 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
272 __be16 vport = 0;
273
274 if (dst_port == svc->port) {
275 /* non-FTP template:
276 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
277 * FTP template:
278 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
279 */
280 if (svc->port != FTPPORT)
281 vport = dst_port;
282 } else {
283 /* Note: persistent fwmark-based services and
284 * persistent port zero service are handled here.
285 * fwmark template:
286 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
287 * port zero template:
288 * <protocol,caddr,0,vaddr,0,daddr,0>
289 */
290 if (svc->fwmark) {
291 protocol = IPPROTO_IP;
292 vaddr = &fwmark;
293 }
294 }
295 /* return *ignored = -1 so NF_DROP can be used */
296 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
297 vaddr, vport, &param) < 0) {
298 *ignored = -1;
299 return NULL;
300 }
301 }
302
303 /* Check if a template already exists */
304 ct = ip_vs_ct_in_get(&param);
305 if (!ct || !ip_vs_check_template(ct)) {
306 /*
307 * No template found or the dest of the connection
308 * template is not available.
309 * return *ignored=0 i.e. ICMP and NF_DROP
310 */
311 dest = svc->scheduler->schedule(svc, skb);
312 if (!dest) {
313 IP_VS_DBG(1, "p-schedule: no dest found.\n");
314 kfree(param.pe_data);
315 *ignored = 0;
316 return NULL;
317 }
318
319 if (dst_port == svc->port && svc->port != FTPPORT)
320 dport = dest->port;
321
322 /* Create a template
323 * This adds param.pe_data to the template,
324 * and thus param.pe_data will be destroyed
325 * when the template expires */
326 ct = ip_vs_conn_new(&param, &dest->addr, dport,
327 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
328 if (ct == NULL) {
329 kfree(param.pe_data);
330 *ignored = -1;
331 return NULL;
332 }
333
334 ct->timeout = svc->timeout;
335 } else {
336 /* set destination with the found template */
337 dest = ct->dest;
338 kfree(param.pe_data);
339 }
340
341 dport = dst_port;
342 if (dport == svc->port && dest->port)
343 dport = dest->port;
344
345 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
346 && iph.protocol == IPPROTO_UDP)?
347 IP_VS_CONN_F_ONE_PACKET : 0;
348
349 /*
350 * Create a new connection according to the template
351 */
352 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
353 src_port, &iph.daddr, dst_port, &param);
354
355 cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
356 if (cp == NULL) {
357 ip_vs_conn_put(ct);
358 *ignored = -1;
359 return NULL;
360 }
361
362 /*
363 * Add its control
364 */
365 ip_vs_control_add(cp, ct);
366 ip_vs_conn_put(ct);
367
368 ip_vs_conn_stats(cp, svc);
369 return cp;
370 }
371
372
373 /*
374 * IPVS main scheduling function
375 * It selects a server according to the virtual service, and
376 * creates a connection entry.
377 * Protocols supported: TCP, UDP
378 *
379 * Usage of *ignored
380 *
381 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
382 * svc/scheduler decides that this packet should be accepted with
383 * NF_ACCEPT because it must not be scheduled.
384 *
385 * 0 : scheduler can not find destination, so try bypass or
386 * return ICMP and then NF_DROP (ip_vs_leave).
387 *
388 * -1 : scheduler tried to schedule but fatal error occurred, eg.
389 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
390 * failure such as missing Call-ID, ENOMEM on skb_linearize
391 * or pe_data. In this case we should return NF_DROP without
392 * any attempts to send ICMP with ip_vs_leave.
393 */
394 struct ip_vs_conn *
395 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
396 struct ip_vs_proto_data *pd, int *ignored)
397 {
398 struct ip_vs_protocol *pp = pd->pp;
399 struct ip_vs_conn *cp = NULL;
400 struct ip_vs_iphdr iph;
401 struct ip_vs_dest *dest;
402 __be16 _ports[2], *pptr;
403 unsigned int flags;
404
405 *ignored = 1;
406 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
407 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
408 if (pptr == NULL)
409 return NULL;
410
411 /*
412 * FTPDATA needs this check when using local real server.
413 * Never schedule Active FTPDATA connections from real server.
414 * For LVS-NAT they must be already created. For other methods
415 * with persistence the connection is created on SYN+ACK.
416 */
417 if (pptr[0] == FTPDATA) {
418 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
419 "Not scheduling FTPDATA");
420 return NULL;
421 }
422
423 /*
424 * Do not schedule replies from local real server.
425 */
426 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
427 (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
428 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
429 "Not scheduling reply for existing connection");
430 __ip_vs_conn_put(cp);
431 return NULL;
432 }
433
434 /*
435 * Persistent service
436 */
437 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
438 return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
439
440 *ignored = 0;
441
442 /*
443 * Non-persistent service
444 */
445 if (!svc->fwmark && pptr[1] != svc->port) {
446 if (!svc->port)
447 pr_err("Schedule: port zero only supported "
448 "in persistent services, "
449 "check your ipvs configuration\n");
450 return NULL;
451 }
452
453 dest = svc->scheduler->schedule(svc, skb);
454 if (dest == NULL) {
455 IP_VS_DBG(1, "Schedule: no dest found.\n");
456 return NULL;
457 }
458
459 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
460 && iph.protocol == IPPROTO_UDP)?
461 IP_VS_CONN_F_ONE_PACKET : 0;
462
463 /*
464 * Create a connection entry.
465 */
466 {
467 struct ip_vs_conn_param p;
468
469 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
470 &iph.saddr, pptr[0], &iph.daddr, pptr[1],
471 &p);
472 cp = ip_vs_conn_new(&p, &dest->addr,
473 dest->port ? dest->port : pptr[1],
474 flags, dest, skb->mark);
475 if (!cp) {
476 *ignored = -1;
477 return NULL;
478 }
479 }
480
481 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
482 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
483 ip_vs_fwd_tag(cp),
484 IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
485 IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
486 IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
487 cp->flags, atomic_read(&cp->refcnt));
488
489 ip_vs_conn_stats(cp, svc);
490 return cp;
491 }
492
493
494 /*
495 * Pass or drop the packet.
496 * Called by ip_vs_in, when the virtual service is available but
497 * no destination is available for a new connection.
498 */
499 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
500 struct ip_vs_proto_data *pd)
501 {
502 __be16 _ports[2], *pptr;
503 struct ip_vs_iphdr iph;
504 #ifdef CONFIG_SYSCTL
505 struct net *net;
506 struct netns_ipvs *ipvs;
507 int unicast;
508 #endif
509
510 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
511
512 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
513 if (pptr == NULL) {
514 ip_vs_service_put(svc);
515 return NF_DROP;
516 }
517
518 #ifdef CONFIG_SYSCTL
519 net = skb_net(skb);
520
521 #ifdef CONFIG_IP_VS_IPV6
522 if (svc->af == AF_INET6)
523 unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
524 else
525 #endif
526 unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
527
528 /* if it is fwmark-based service, the cache_bypass sysctl is up
529 and the destination is a non-local unicast, then create
530 a cache_bypass connection entry */
531 ipvs = net_ipvs(net);
532 if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
533 int ret, cs;
534 struct ip_vs_conn *cp;
535 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
536 iph.protocol == IPPROTO_UDP)?
537 IP_VS_CONN_F_ONE_PACKET : 0;
538 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
539
540 ip_vs_service_put(svc);
541
542 /* create a new connection entry */
543 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
544 {
545 struct ip_vs_conn_param p;
546 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
547 &iph.saddr, pptr[0],
548 &iph.daddr, pptr[1], &p);
549 cp = ip_vs_conn_new(&p, &daddr, 0,
550 IP_VS_CONN_F_BYPASS | flags,
551 NULL, skb->mark);
552 if (!cp)
553 return NF_DROP;
554 }
555
556 /* statistics */
557 ip_vs_in_stats(cp, skb);
558
559 /* set state */
560 cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
561
562 /* transmit the first SYN packet */
563 ret = cp->packet_xmit(skb, cp, pd->pp);
564 /* do not touch skb anymore */
565
566 atomic_inc(&cp->in_pkts);
567 ip_vs_conn_put(cp);
568 return ret;
569 }
570 #endif
571
572 /*
573 * When the virtual ftp service is presented, packets destined
574 * for other services on the VIP may get here (except services
575 * listed in the ipvs table), pass the packets, because it is
576 * not ipvs job to decide to drop the packets.
577 */
578 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT)) {
579 ip_vs_service_put(svc);
580 return NF_ACCEPT;
581 }
582
583 ip_vs_service_put(svc);
584
585 /*
586 * Notify the client that the destination is unreachable, and
587 * release the socket buffer.
588 * Since it is in IP layer, the TCP socket is not actually
589 * created, the TCP RST packet cannot be sent, instead that
590 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
591 */
592 #ifdef CONFIG_IP_VS_IPV6
593 if (svc->af == AF_INET6) {
594 if (!skb->dev) {
595 struct net *net = dev_net(skb_dst(skb)->dev);
596
597 skb->dev = net->loopback_dev;
598 }
599 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
600 } else
601 #endif
602 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
603
604 return NF_DROP;
605 }
606
607 #ifdef CONFIG_SYSCTL
608
609 static int sysctl_snat_reroute(struct sk_buff *skb)
610 {
611 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
612 return ipvs->sysctl_snat_reroute;
613 }
614
615 static int sysctl_nat_icmp_send(struct net *net)
616 {
617 struct netns_ipvs *ipvs = net_ipvs(net);
618 return ipvs->sysctl_nat_icmp_send;
619 }
620
621 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
622 {
623 return ipvs->sysctl_expire_nodest_conn;
624 }
625
626 #else
627
628 static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
629 static int sysctl_nat_icmp_send(struct net *net) { return 0; }
630 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
631
632 #endif
633
634 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
635 {
636 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
637 }
638
639 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
640 {
641 if (NF_INET_LOCAL_IN == hooknum)
642 return IP_DEFRAG_VS_IN;
643 if (NF_INET_FORWARD == hooknum)
644 return IP_DEFRAG_VS_FWD;
645 return IP_DEFRAG_VS_OUT;
646 }
647
648 static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
649 {
650 int err = ip_defrag(skb, user);
651
652 if (!err)
653 ip_send_check(ip_hdr(skb));
654
655 return err;
656 }
657
658 #ifdef CONFIG_IP_VS_IPV6
659 static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
660 {
661 /* TODO IPv6: Find out what to do here for IPv6 */
662 return 0;
663 }
664 #endif
665
666 static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
667 {
668 #ifdef CONFIG_IP_VS_IPV6
669 if (af == AF_INET6) {
670 if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
671 return 1;
672 } else
673 #endif
674 if ((sysctl_snat_reroute(skb) ||
675 skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
676 ip_route_me_harder(skb, RTN_LOCAL) != 0)
677 return 1;
678
679 return 0;
680 }
681
682 /*
683 * Packet has been made sufficiently writable in caller
684 * - inout: 1=in->out, 0=out->in
685 */
686 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
687 struct ip_vs_conn *cp, int inout)
688 {
689 struct iphdr *iph = ip_hdr(skb);
690 unsigned int icmp_offset = iph->ihl*4;
691 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
692 icmp_offset);
693 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
694
695 if (inout) {
696 iph->saddr = cp->vaddr.ip;
697 ip_send_check(iph);
698 ciph->daddr = cp->vaddr.ip;
699 ip_send_check(ciph);
700 } else {
701 iph->daddr = cp->daddr.ip;
702 ip_send_check(iph);
703 ciph->saddr = cp->daddr.ip;
704 ip_send_check(ciph);
705 }
706
707 /* the TCP/UDP/SCTP port */
708 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
709 IPPROTO_SCTP == ciph->protocol) {
710 __be16 *ports = (void *)ciph + ciph->ihl*4;
711
712 if (inout)
713 ports[1] = cp->vport;
714 else
715 ports[0] = cp->dport;
716 }
717
718 /* And finally the ICMP checksum */
719 icmph->checksum = 0;
720 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
721 skb->ip_summed = CHECKSUM_UNNECESSARY;
722
723 if (inout)
724 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
725 "Forwarding altered outgoing ICMP");
726 else
727 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
728 "Forwarding altered incoming ICMP");
729 }
730
731 #ifdef CONFIG_IP_VS_IPV6
732 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
733 struct ip_vs_conn *cp, int inout)
734 {
735 struct ipv6hdr *iph = ipv6_hdr(skb);
736 unsigned int icmp_offset = sizeof(struct ipv6hdr);
737 struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) +
738 icmp_offset);
739 struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1);
740
741 if (inout) {
742 iph->saddr = cp->vaddr.in6;
743 ciph->daddr = cp->vaddr.in6;
744 } else {
745 iph->daddr = cp->daddr.in6;
746 ciph->saddr = cp->daddr.in6;
747 }
748
749 /* the TCP/UDP/SCTP port */
750 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr ||
751 IPPROTO_SCTP == ciph->nexthdr) {
752 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
753
754 if (inout)
755 ports[1] = cp->vport;
756 else
757 ports[0] = cp->dport;
758 }
759
760 /* And finally the ICMP checksum */
761 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
762 skb->len - icmp_offset,
763 IPPROTO_ICMPV6, 0);
764 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
765 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
766 skb->ip_summed = CHECKSUM_PARTIAL;
767
768 if (inout)
769 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
770 (void *)ciph - (void *)iph,
771 "Forwarding altered outgoing ICMPv6");
772 else
773 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
774 (void *)ciph - (void *)iph,
775 "Forwarding altered incoming ICMPv6");
776 }
777 #endif
778
779 /* Handle relevant response ICMP messages - forward to the right
780 * destination host.
781 */
782 static int handle_response_icmp(int af, struct sk_buff *skb,
783 union nf_inet_addr *snet,
784 __u8 protocol, struct ip_vs_conn *cp,
785 struct ip_vs_protocol *pp,
786 unsigned int offset, unsigned int ihl)
787 {
788 unsigned int verdict = NF_DROP;
789
790 if (IP_VS_FWD_METHOD(cp) != 0) {
791 pr_err("shouldn't reach here, because the box is on the "
792 "half connection in the tun/dr module.\n");
793 }
794
795 /* Ensure the checksum is correct */
796 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
797 /* Failed checksum! */
798 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
799 IP_VS_DBG_ADDR(af, snet));
800 goto out;
801 }
802
803 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
804 IPPROTO_SCTP == protocol)
805 offset += 2 * sizeof(__u16);
806 if (!skb_make_writable(skb, offset))
807 goto out;
808
809 #ifdef CONFIG_IP_VS_IPV6
810 if (af == AF_INET6)
811 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
812 else
813 #endif
814 ip_vs_nat_icmp(skb, pp, cp, 1);
815
816 if (ip_vs_route_me_harder(af, skb))
817 goto out;
818
819 /* do the statistics and put it back */
820 ip_vs_out_stats(cp, skb);
821
822 skb->ipvs_property = 1;
823 if (!(cp->flags & IP_VS_CONN_F_NFCT))
824 ip_vs_notrack(skb);
825 else
826 ip_vs_update_conntrack(skb, cp, 0);
827 verdict = NF_ACCEPT;
828
829 out:
830 __ip_vs_conn_put(cp);
831
832 return verdict;
833 }
834
835 /*
836 * Handle ICMP messages in the inside-to-outside direction (outgoing).
837 * Find any that might be relevant, check against existing connections.
838 * Currently handles error types - unreachable, quench, ttl exceeded.
839 */
840 static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
841 unsigned int hooknum)
842 {
843 struct iphdr *iph;
844 struct icmphdr _icmph, *ic;
845 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
846 struct ip_vs_iphdr ciph;
847 struct ip_vs_conn *cp;
848 struct ip_vs_protocol *pp;
849 unsigned int offset, ihl;
850 union nf_inet_addr snet;
851
852 *related = 1;
853
854 /* reassemble IP fragments */
855 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
856 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
857 return NF_STOLEN;
858 }
859
860 iph = ip_hdr(skb);
861 offset = ihl = iph->ihl * 4;
862 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
863 if (ic == NULL)
864 return NF_DROP;
865
866 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
867 ic->type, ntohs(icmp_id(ic)),
868 &iph->saddr, &iph->daddr);
869
870 /*
871 * Work through seeing if this is for us.
872 * These checks are supposed to be in an order that means easy
873 * things are checked first to speed up processing.... however
874 * this means that some packets will manage to get a long way
875 * down this stack and then be rejected, but that's life.
876 */
877 if ((ic->type != ICMP_DEST_UNREACH) &&
878 (ic->type != ICMP_SOURCE_QUENCH) &&
879 (ic->type != ICMP_TIME_EXCEEDED)) {
880 *related = 0;
881 return NF_ACCEPT;
882 }
883
884 /* Now find the contained IP header */
885 offset += sizeof(_icmph);
886 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
887 if (cih == NULL)
888 return NF_ACCEPT; /* The packet looks wrong, ignore */
889
890 pp = ip_vs_proto_get(cih->protocol);
891 if (!pp)
892 return NF_ACCEPT;
893
894 /* Is the embedded protocol header present? */
895 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
896 pp->dont_defrag))
897 return NF_ACCEPT;
898
899 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
900 "Checking outgoing ICMP for");
901
902 offset += cih->ihl * 4;
903
904 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
905 /* The embedded headers contain source and dest in reverse order */
906 cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
907 if (!cp)
908 return NF_ACCEPT;
909
910 snet.ip = iph->saddr;
911 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
912 pp, offset, ihl);
913 }
914
915 #ifdef CONFIG_IP_VS_IPV6
916 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
917 unsigned int hooknum)
918 {
919 struct ipv6hdr *iph;
920 struct icmp6hdr _icmph, *ic;
921 struct ipv6hdr _ciph, *cih; /* The ip header contained
922 within the ICMP */
923 struct ip_vs_iphdr ciph;
924 struct ip_vs_conn *cp;
925 struct ip_vs_protocol *pp;
926 unsigned int offset;
927 union nf_inet_addr snet;
928
929 *related = 1;
930
931 /* reassemble IP fragments */
932 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
933 if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum)))
934 return NF_STOLEN;
935 }
936
937 iph = ipv6_hdr(skb);
938 offset = sizeof(struct ipv6hdr);
939 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
940 if (ic == NULL)
941 return NF_DROP;
942
943 IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %pI6->%pI6\n",
944 ic->icmp6_type, ntohs(icmpv6_id(ic)),
945 &iph->saddr, &iph->daddr);
946
947 /*
948 * Work through seeing if this is for us.
949 * These checks are supposed to be in an order that means easy
950 * things are checked first to speed up processing.... however
951 * this means that some packets will manage to get a long way
952 * down this stack and then be rejected, but that's life.
953 */
954 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
955 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
956 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
957 *related = 0;
958 return NF_ACCEPT;
959 }
960
961 /* Now find the contained IP header */
962 offset += sizeof(_icmph);
963 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
964 if (cih == NULL)
965 return NF_ACCEPT; /* The packet looks wrong, ignore */
966
967 pp = ip_vs_proto_get(cih->nexthdr);
968 if (!pp)
969 return NF_ACCEPT;
970
971 /* Is the embedded protocol header present? */
972 /* TODO: we don't support fragmentation at the moment anyways */
973 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
974 return NF_ACCEPT;
975
976 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
977 "Checking outgoing ICMPv6 for");
978
979 offset += sizeof(struct ipv6hdr);
980
981 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
982 /* The embedded headers contain source and dest in reverse order */
983 cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
984 if (!cp)
985 return NF_ACCEPT;
986
987 ipv6_addr_copy(&snet.in6, &iph->saddr);
988 return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
989 pp, offset, sizeof(struct ipv6hdr));
990 }
991 #endif
992
993 /*
994 * Check if sctp chunc is ABORT chunk
995 */
996 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
997 {
998 sctp_chunkhdr_t *sch, schunk;
999 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
1000 sizeof(schunk), &schunk);
1001 if (sch == NULL)
1002 return 0;
1003 if (sch->type == SCTP_CID_ABORT)
1004 return 1;
1005 return 0;
1006 }
1007
1008 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1009 {
1010 struct tcphdr _tcph, *th;
1011
1012 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1013 if (th == NULL)
1014 return 0;
1015 return th->rst;
1016 }
1017
1018 /* Handle response packets: rewrite addresses and send away...
1019 */
1020 static unsigned int
1021 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1022 struct ip_vs_conn *cp, int ihl)
1023 {
1024 struct ip_vs_protocol *pp = pd->pp;
1025
1026 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
1027
1028 if (!skb_make_writable(skb, ihl))
1029 goto drop;
1030
1031 /* mangle the packet */
1032 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
1033 goto drop;
1034
1035 #ifdef CONFIG_IP_VS_IPV6
1036 if (af == AF_INET6)
1037 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1038 else
1039 #endif
1040 {
1041 ip_hdr(skb)->saddr = cp->vaddr.ip;
1042 ip_send_check(ip_hdr(skb));
1043 }
1044
1045 /*
1046 * nf_iterate does not expect change in the skb->dst->dev.
1047 * It looks like it is not fatal to enable this code for hooks
1048 * where our handlers are at the end of the chain list and
1049 * when all next handlers use skb->dst->dev and not outdev.
1050 * It will definitely route properly the inout NAT traffic
1051 * when multiple paths are used.
1052 */
1053
1054 /* For policy routing, packets originating from this
1055 * machine itself may be routed differently to packets
1056 * passing through. We want this packet to be routed as
1057 * if it came from this machine itself. So re-compute
1058 * the routing information.
1059 */
1060 if (ip_vs_route_me_harder(af, skb))
1061 goto drop;
1062
1063 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
1064
1065 ip_vs_out_stats(cp, skb);
1066 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1067 skb->ipvs_property = 1;
1068 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1069 ip_vs_notrack(skb);
1070 else
1071 ip_vs_update_conntrack(skb, cp, 0);
1072 ip_vs_conn_put(cp);
1073
1074 LeaveFunction(11);
1075 return NF_ACCEPT;
1076
1077 drop:
1078 ip_vs_conn_put(cp);
1079 kfree_skb(skb);
1080 LeaveFunction(11);
1081 return NF_STOLEN;
1082 }
1083
1084 /*
1085 * Check if outgoing packet belongs to the established ip_vs_conn.
1086 */
1087 static unsigned int
1088 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1089 {
1090 struct net *net = NULL;
1091 struct ip_vs_iphdr iph;
1092 struct ip_vs_protocol *pp;
1093 struct ip_vs_proto_data *pd;
1094 struct ip_vs_conn *cp;
1095
1096 EnterFunction(11);
1097
1098 /* Already marked as IPVS request or reply? */
1099 if (skb->ipvs_property)
1100 return NF_ACCEPT;
1101
1102 /* Bad... Do not break raw sockets */
1103 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1104 af == AF_INET)) {
1105 struct sock *sk = skb->sk;
1106 struct inet_sock *inet = inet_sk(skb->sk);
1107
1108 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1109 return NF_ACCEPT;
1110 }
1111
1112 if (unlikely(!skb_dst(skb)))
1113 return NF_ACCEPT;
1114
1115 net = skb_net(skb);
1116 if (!net_ipvs(net)->enable)
1117 return NF_ACCEPT;
1118
1119 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1120 #ifdef CONFIG_IP_VS_IPV6
1121 if (af == AF_INET6) {
1122 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1123 int related;
1124 int verdict = ip_vs_out_icmp_v6(skb, &related,
1125 hooknum);
1126
1127 if (related)
1128 return verdict;
1129 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1130 }
1131 } else
1132 #endif
1133 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1134 int related;
1135 int verdict = ip_vs_out_icmp(skb, &related, hooknum);
1136
1137 if (related)
1138 return verdict;
1139 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1140 }
1141
1142 pd = ip_vs_proto_data_get(net, iph.protocol);
1143 if (unlikely(!pd))
1144 return NF_ACCEPT;
1145 pp = pd->pp;
1146
1147 /* reassemble IP fragments */
1148 #ifdef CONFIG_IP_VS_IPV6
1149 if (af == AF_INET6) {
1150 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1151 if (ip_vs_gather_frags_v6(skb,
1152 ip_vs_defrag_user(hooknum)))
1153 return NF_STOLEN;
1154 }
1155
1156 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1157 } else
1158 #endif
1159 if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) &&
1160 !pp->dont_defrag)) {
1161 if (ip_vs_gather_frags(skb,
1162 ip_vs_defrag_user(hooknum)))
1163 return NF_STOLEN;
1164
1165 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1166 }
1167
1168 /*
1169 * Check if the packet belongs to an existing entry
1170 */
1171 cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
1172
1173 if (likely(cp))
1174 return handle_response(af, skb, pd, cp, iph.len);
1175 if (sysctl_nat_icmp_send(net) &&
1176 (pp->protocol == IPPROTO_TCP ||
1177 pp->protocol == IPPROTO_UDP ||
1178 pp->protocol == IPPROTO_SCTP)) {
1179 __be16 _ports[2], *pptr;
1180
1181 pptr = skb_header_pointer(skb, iph.len,
1182 sizeof(_ports), _ports);
1183 if (pptr == NULL)
1184 return NF_ACCEPT; /* Not for me */
1185 if (ip_vs_lookup_real_service(net, af, iph.protocol,
1186 &iph.saddr,
1187 pptr[0])) {
1188 /*
1189 * Notify the real server: there is no
1190 * existing entry if it is not RST
1191 * packet or not TCP packet.
1192 */
1193 if ((iph.protocol != IPPROTO_TCP &&
1194 iph.protocol != IPPROTO_SCTP)
1195 || ((iph.protocol == IPPROTO_TCP
1196 && !is_tcp_reset(skb, iph.len))
1197 || (iph.protocol == IPPROTO_SCTP
1198 && !is_sctp_abort(skb,
1199 iph.len)))) {
1200 #ifdef CONFIG_IP_VS_IPV6
1201 if (af == AF_INET6) {
1202 struct net *net =
1203 dev_net(skb_dst(skb)->dev);
1204
1205 if (!skb->dev)
1206 skb->dev = net->loopback_dev;
1207 icmpv6_send(skb,
1208 ICMPV6_DEST_UNREACH,
1209 ICMPV6_PORT_UNREACH,
1210 0);
1211 } else
1212 #endif
1213 icmp_send(skb,
1214 ICMP_DEST_UNREACH,
1215 ICMP_PORT_UNREACH, 0);
1216 return NF_DROP;
1217 }
1218 }
1219 }
1220 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1221 "ip_vs_out: packet continues traversal as normal");
1222 return NF_ACCEPT;
1223 }
1224
1225 /*
1226 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1227 * used only for VS/NAT.
1228 * Check if packet is reply for established ip_vs_conn.
1229 */
1230 static unsigned int
1231 ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
1232 const struct net_device *in, const struct net_device *out,
1233 int (*okfn)(struct sk_buff *))
1234 {
1235 return ip_vs_out(hooknum, skb, AF_INET);
1236 }
1237
1238 /*
1239 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1240 * Check if packet is reply for established ip_vs_conn.
1241 */
1242 static unsigned int
1243 ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
1244 const struct net_device *in, const struct net_device *out,
1245 int (*okfn)(struct sk_buff *))
1246 {
1247 unsigned int verdict;
1248
1249 /* Disable BH in LOCAL_OUT until all places are fixed */
1250 local_bh_disable();
1251 verdict = ip_vs_out(hooknum, skb, AF_INET);
1252 local_bh_enable();
1253 return verdict;
1254 }
1255
1256 #ifdef CONFIG_IP_VS_IPV6
1257
1258 /*
1259 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1260 * used only for VS/NAT.
1261 * Check if packet is reply for established ip_vs_conn.
1262 */
1263 static unsigned int
1264 ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
1265 const struct net_device *in, const struct net_device *out,
1266 int (*okfn)(struct sk_buff *))
1267 {
1268 return ip_vs_out(hooknum, skb, AF_INET6);
1269 }
1270
1271 /*
1272 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1273 * Check if packet is reply for established ip_vs_conn.
1274 */
1275 static unsigned int
1276 ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
1277 const struct net_device *in, const struct net_device *out,
1278 int (*okfn)(struct sk_buff *))
1279 {
1280 unsigned int verdict;
1281
1282 /* Disable BH in LOCAL_OUT until all places are fixed */
1283 local_bh_disable();
1284 verdict = ip_vs_out(hooknum, skb, AF_INET6);
1285 local_bh_enable();
1286 return verdict;
1287 }
1288
1289 #endif
1290
1291 /*
1292 * Handle ICMP messages in the outside-to-inside direction (incoming).
1293 * Find any that might be relevant, check against existing connections,
1294 * forward to the right destination host if relevant.
1295 * Currently handles error types - unreachable, quench, ttl exceeded.
1296 */
1297 static int
1298 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1299 {
1300 struct net *net = NULL;
1301 struct iphdr *iph;
1302 struct icmphdr _icmph, *ic;
1303 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1304 struct ip_vs_iphdr ciph;
1305 struct ip_vs_conn *cp;
1306 struct ip_vs_protocol *pp;
1307 struct ip_vs_proto_data *pd;
1308 unsigned int offset, ihl, verdict;
1309
1310 *related = 1;
1311
1312 /* reassemble IP fragments */
1313 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
1314 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
1315 return NF_STOLEN;
1316 }
1317
1318 iph = ip_hdr(skb);
1319 offset = ihl = iph->ihl * 4;
1320 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1321 if (ic == NULL)
1322 return NF_DROP;
1323
1324 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1325 ic->type, ntohs(icmp_id(ic)),
1326 &iph->saddr, &iph->daddr);
1327
1328 /*
1329 * Work through seeing if this is for us.
1330 * These checks are supposed to be in an order that means easy
1331 * things are checked first to speed up processing.... however
1332 * this means that some packets will manage to get a long way
1333 * down this stack and then be rejected, but that's life.
1334 */
1335 if ((ic->type != ICMP_DEST_UNREACH) &&
1336 (ic->type != ICMP_SOURCE_QUENCH) &&
1337 (ic->type != ICMP_TIME_EXCEEDED)) {
1338 *related = 0;
1339 return NF_ACCEPT;
1340 }
1341
1342 /* Now find the contained IP header */
1343 offset += sizeof(_icmph);
1344 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1345 if (cih == NULL)
1346 return NF_ACCEPT; /* The packet looks wrong, ignore */
1347
1348 net = skb_net(skb);
1349
1350 pd = ip_vs_proto_data_get(net, cih->protocol);
1351 if (!pd)
1352 return NF_ACCEPT;
1353 pp = pd->pp;
1354
1355 /* Is the embedded protocol header present? */
1356 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1357 pp->dont_defrag))
1358 return NF_ACCEPT;
1359
1360 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1361 "Checking incoming ICMP for");
1362
1363 offset += cih->ihl * 4;
1364
1365 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
1366 /* The embedded headers contain source and dest in reverse order */
1367 cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
1368 if (!cp)
1369 return NF_ACCEPT;
1370
1371 verdict = NF_DROP;
1372
1373 /* Ensure the checksum is correct */
1374 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1375 /* Failed checksum! */
1376 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1377 &iph->saddr);
1378 goto out;
1379 }
1380
1381 /* do the statistics and put it back */
1382 ip_vs_in_stats(cp, skb);
1383 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
1384 offset += 2 * sizeof(__u16);
1385 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset);
1386 /* LOCALNODE from FORWARD hook is not supported */
1387 if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
1388 skb_rtable(skb)->rt_flags & RTCF_LOCAL) {
1389 IP_VS_DBG(1, "%s(): "
1390 "local delivery to %pI4 but in FORWARD\n",
1391 __func__, &skb_rtable(skb)->rt_dst);
1392 verdict = NF_DROP;
1393 }
1394
1395 out:
1396 __ip_vs_conn_put(cp);
1397
1398 return verdict;
1399 }
1400
1401 #ifdef CONFIG_IP_VS_IPV6
1402 static int
1403 ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1404 {
1405 struct net *net = NULL;
1406 struct ipv6hdr *iph;
1407 struct icmp6hdr _icmph, *ic;
1408 struct ipv6hdr _ciph, *cih; /* The ip header contained
1409 within the ICMP */
1410 struct ip_vs_iphdr ciph;
1411 struct ip_vs_conn *cp;
1412 struct ip_vs_protocol *pp;
1413 struct ip_vs_proto_data *pd;
1414 unsigned int offset, verdict;
1415 struct rt6_info *rt;
1416
1417 *related = 1;
1418
1419 /* reassemble IP fragments */
1420 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1421 if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum)))
1422 return NF_STOLEN;
1423 }
1424
1425 iph = ipv6_hdr(skb);
1426 offset = sizeof(struct ipv6hdr);
1427 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1428 if (ic == NULL)
1429 return NF_DROP;
1430
1431 IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %pI6->%pI6\n",
1432 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1433 &iph->saddr, &iph->daddr);
1434
1435 /*
1436 * Work through seeing if this is for us.
1437 * These checks are supposed to be in an order that means easy
1438 * things are checked first to speed up processing.... however
1439 * this means that some packets will manage to get a long way
1440 * down this stack and then be rejected, but that's life.
1441 */
1442 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
1443 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
1444 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
1445 *related = 0;
1446 return NF_ACCEPT;
1447 }
1448
1449 /* Now find the contained IP header */
1450 offset += sizeof(_icmph);
1451 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1452 if (cih == NULL)
1453 return NF_ACCEPT; /* The packet looks wrong, ignore */
1454
1455 net = skb_net(skb);
1456 pd = ip_vs_proto_data_get(net, cih->nexthdr);
1457 if (!pd)
1458 return NF_ACCEPT;
1459 pp = pd->pp;
1460
1461 /* Is the embedded protocol header present? */
1462 /* TODO: we don't support fragmentation at the moment anyways */
1463 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
1464 return NF_ACCEPT;
1465
1466 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
1467 "Checking incoming ICMPv6 for");
1468
1469 offset += sizeof(struct ipv6hdr);
1470
1471 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
1472 /* The embedded headers contain source and dest in reverse order */
1473 cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
1474 if (!cp)
1475 return NF_ACCEPT;
1476
1477 verdict = NF_DROP;
1478
1479 /* do the statistics and put it back */
1480 ip_vs_in_stats(cp, skb);
1481 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1482 IPPROTO_SCTP == cih->nexthdr)
1483 offset += 2 * sizeof(__u16);
1484 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
1485 /* LOCALNODE from FORWARD hook is not supported */
1486 if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
1487 (rt = (struct rt6_info *) skb_dst(skb)) &&
1488 rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK) {
1489 IP_VS_DBG(1, "%s(): "
1490 "local delivery to %pI6 but in FORWARD\n",
1491 __func__, &rt->rt6i_dst);
1492 verdict = NF_DROP;
1493 }
1494
1495 __ip_vs_conn_put(cp);
1496
1497 return verdict;
1498 }
1499 #endif
1500
1501
1502 /*
1503 * Check if it's for virtual services, look it up,
1504 * and send it on its way...
1505 */
1506 static unsigned int
1507 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1508 {
1509 struct net *net;
1510 struct ip_vs_iphdr iph;
1511 struct ip_vs_protocol *pp;
1512 struct ip_vs_proto_data *pd;
1513 struct ip_vs_conn *cp;
1514 int ret, restart, pkts;
1515 struct netns_ipvs *ipvs;
1516
1517 /* Already marked as IPVS request or reply? */
1518 if (skb->ipvs_property)
1519 return NF_ACCEPT;
1520
1521 /*
1522 * Big tappo:
1523 * - remote client: only PACKET_HOST
1524 * - route: used for struct net when skb->dev is unset
1525 */
1526 if (unlikely((skb->pkt_type != PACKET_HOST &&
1527 hooknum != NF_INET_LOCAL_OUT) ||
1528 !skb_dst(skb))) {
1529 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1530 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1531 " ignored in hook %u\n",
1532 skb->pkt_type, iph.protocol,
1533 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1534 return NF_ACCEPT;
1535 }
1536 /* ipvs enabled in this netns ? */
1537 net = skb_net(skb);
1538 if (!net_ipvs(net)->enable)
1539 return NF_ACCEPT;
1540
1541 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1542
1543 /* Bad... Do not break raw sockets */
1544 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1545 af == AF_INET)) {
1546 struct sock *sk = skb->sk;
1547 struct inet_sock *inet = inet_sk(skb->sk);
1548
1549 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1550 return NF_ACCEPT;
1551 }
1552
1553 #ifdef CONFIG_IP_VS_IPV6
1554 if (af == AF_INET6) {
1555 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1556 int related;
1557 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum);
1558
1559 if (related)
1560 return verdict;
1561 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1562 }
1563 } else
1564 #endif
1565 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1566 int related;
1567 int verdict = ip_vs_in_icmp(skb, &related, hooknum);
1568
1569 if (related)
1570 return verdict;
1571 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1572 }
1573
1574 /* Protocol supported? */
1575 pd = ip_vs_proto_data_get(net, iph.protocol);
1576 if (unlikely(!pd))
1577 return NF_ACCEPT;
1578 pp = pd->pp;
1579 /*
1580 * Check if the packet belongs to an existing connection entry
1581 */
1582 cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
1583
1584 if (unlikely(!cp)) {
1585 int v;
1586
1587 if (!pp->conn_schedule(af, skb, pd, &v, &cp))
1588 return v;
1589 }
1590
1591 if (unlikely(!cp)) {
1592 /* sorry, all this trouble for a no-hit :) */
1593 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1594 "ip_vs_in: packet continues traversal as normal");
1595 return NF_ACCEPT;
1596 }
1597
1598 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1599 ipvs = net_ipvs(net);
1600 /* Check the server status */
1601 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1602 /* the destination server is not available */
1603
1604 if (sysctl_expire_nodest_conn(ipvs)) {
1605 /* try to expire the connection immediately */
1606 ip_vs_conn_expire_now(cp);
1607 }
1608 /* don't restart its timer, and silently
1609 drop the packet. */
1610 __ip_vs_conn_put(cp);
1611 return NF_DROP;
1612 }
1613
1614 ip_vs_in_stats(cp, skb);
1615 restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1616 if (cp->packet_xmit)
1617 ret = cp->packet_xmit(skb, cp, pp);
1618 /* do not touch skb anymore */
1619 else {
1620 IP_VS_DBG_RL("warning: packet_xmit is null");
1621 ret = NF_ACCEPT;
1622 }
1623
1624 /* Increase its packet counter and check if it is needed
1625 * to be synchronized
1626 *
1627 * Sync connection if it is about to close to
1628 * encorage the standby servers to update the connections timeout
1629 *
1630 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1631 */
1632
1633 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1634 pkts = sysctl_sync_threshold(ipvs);
1635 else
1636 pkts = atomic_add_return(1, &cp->in_pkts);
1637
1638 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
1639 cp->protocol == IPPROTO_SCTP) {
1640 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1641 (pkts % sysctl_sync_period(ipvs)
1642 == sysctl_sync_threshold(ipvs))) ||
1643 (cp->old_state != cp->state &&
1644 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
1645 (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
1646 (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
1647 ip_vs_sync_conn(net, cp);
1648 goto out;
1649 }
1650 }
1651
1652 /* Keep this block last: TCP and others with pp->num_states <= 1 */
1653 else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
1654 (((cp->protocol != IPPROTO_TCP ||
1655 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
1656 (pkts % sysctl_sync_period(ipvs)
1657 == sysctl_sync_threshold(ipvs))) ||
1658 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
1659 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
1660 (cp->state == IP_VS_TCP_S_CLOSE) ||
1661 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1662 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1663 ip_vs_sync_conn(net, cp);
1664 out:
1665 cp->old_state = cp->state;
1666
1667 ip_vs_conn_put(cp);
1668 return ret;
1669 }
1670
1671 /*
1672 * AF_INET handler in NF_INET_LOCAL_IN chain
1673 * Schedule and forward packets from remote clients
1674 */
1675 static unsigned int
1676 ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
1677 const struct net_device *in,
1678 const struct net_device *out,
1679 int (*okfn)(struct sk_buff *))
1680 {
1681 return ip_vs_in(hooknum, skb, AF_INET);
1682 }
1683
1684 /*
1685 * AF_INET handler in NF_INET_LOCAL_OUT chain
1686 * Schedule and forward packets from local clients
1687 */
1688 static unsigned int
1689 ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
1690 const struct net_device *in, const struct net_device *out,
1691 int (*okfn)(struct sk_buff *))
1692 {
1693 unsigned int verdict;
1694
1695 /* Disable BH in LOCAL_OUT until all places are fixed */
1696 local_bh_disable();
1697 verdict = ip_vs_in(hooknum, skb, AF_INET);
1698 local_bh_enable();
1699 return verdict;
1700 }
1701
1702 #ifdef CONFIG_IP_VS_IPV6
1703
1704 /*
1705 * AF_INET6 handler in NF_INET_LOCAL_IN chain
1706 * Schedule and forward packets from remote clients
1707 */
1708 static unsigned int
1709 ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
1710 const struct net_device *in,
1711 const struct net_device *out,
1712 int (*okfn)(struct sk_buff *))
1713 {
1714 return ip_vs_in(hooknum, skb, AF_INET6);
1715 }
1716
1717 /*
1718 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
1719 * Schedule and forward packets from local clients
1720 */
1721 static unsigned int
1722 ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
1723 const struct net_device *in, const struct net_device *out,
1724 int (*okfn)(struct sk_buff *))
1725 {
1726 unsigned int verdict;
1727
1728 /* Disable BH in LOCAL_OUT until all places are fixed */
1729 local_bh_disable();
1730 verdict = ip_vs_in(hooknum, skb, AF_INET6);
1731 local_bh_enable();
1732 return verdict;
1733 }
1734
1735 #endif
1736
1737
1738 /*
1739 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1740 * related packets destined for 0.0.0.0/0.
1741 * When fwmark-based virtual service is used, such as transparent
1742 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1743 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1744 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1745 * and send them to ip_vs_in_icmp.
1746 */
1747 static unsigned int
1748 ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1749 const struct net_device *in, const struct net_device *out,
1750 int (*okfn)(struct sk_buff *))
1751 {
1752 int r;
1753 struct net *net;
1754
1755 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1756 return NF_ACCEPT;
1757
1758 /* ipvs enabled in this netns ? */
1759 net = skb_net(skb);
1760 if (!net_ipvs(net)->enable)
1761 return NF_ACCEPT;
1762
1763 return ip_vs_in_icmp(skb, &r, hooknum);
1764 }
1765
1766 #ifdef CONFIG_IP_VS_IPV6
1767 static unsigned int
1768 ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1769 const struct net_device *in, const struct net_device *out,
1770 int (*okfn)(struct sk_buff *))
1771 {
1772 int r;
1773 struct net *net;
1774
1775 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1776 return NF_ACCEPT;
1777
1778 /* ipvs enabled in this netns ? */
1779 net = skb_net(skb);
1780 if (!net_ipvs(net)->enable)
1781 return NF_ACCEPT;
1782
1783 return ip_vs_in_icmp_v6(skb, &r, hooknum);
1784 }
1785 #endif
1786
1787
1788 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1789 /* After packet filtering, change source only for VS/NAT */
1790 {
1791 .hook = ip_vs_reply4,
1792 .owner = THIS_MODULE,
1793 .pf = PF_INET,
1794 .hooknum = NF_INET_LOCAL_IN,
1795 .priority = 99,
1796 },
1797 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1798 * or VS/NAT(change destination), so that filtering rules can be
1799 * applied to IPVS. */
1800 {
1801 .hook = ip_vs_remote_request4,
1802 .owner = THIS_MODULE,
1803 .pf = PF_INET,
1804 .hooknum = NF_INET_LOCAL_IN,
1805 .priority = 101,
1806 },
1807 /* Before ip_vs_in, change source only for VS/NAT */
1808 {
1809 .hook = ip_vs_local_reply4,
1810 .owner = THIS_MODULE,
1811 .pf = PF_INET,
1812 .hooknum = NF_INET_LOCAL_OUT,
1813 .priority = -99,
1814 },
1815 /* After mangle, schedule and forward local requests */
1816 {
1817 .hook = ip_vs_local_request4,
1818 .owner = THIS_MODULE,
1819 .pf = PF_INET,
1820 .hooknum = NF_INET_LOCAL_OUT,
1821 .priority = -98,
1822 },
1823 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1824 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1825 {
1826 .hook = ip_vs_forward_icmp,
1827 .owner = THIS_MODULE,
1828 .pf = PF_INET,
1829 .hooknum = NF_INET_FORWARD,
1830 .priority = 99,
1831 },
1832 /* After packet filtering, change source only for VS/NAT */
1833 {
1834 .hook = ip_vs_reply4,
1835 .owner = THIS_MODULE,
1836 .pf = PF_INET,
1837 .hooknum = NF_INET_FORWARD,
1838 .priority = 100,
1839 },
1840 #ifdef CONFIG_IP_VS_IPV6
1841 /* After packet filtering, change source only for VS/NAT */
1842 {
1843 .hook = ip_vs_reply6,
1844 .owner = THIS_MODULE,
1845 .pf = PF_INET6,
1846 .hooknum = NF_INET_LOCAL_IN,
1847 .priority = 99,
1848 },
1849 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1850 * or VS/NAT(change destination), so that filtering rules can be
1851 * applied to IPVS. */
1852 {
1853 .hook = ip_vs_remote_request6,
1854 .owner = THIS_MODULE,
1855 .pf = PF_INET6,
1856 .hooknum = NF_INET_LOCAL_IN,
1857 .priority = 101,
1858 },
1859 /* Before ip_vs_in, change source only for VS/NAT */
1860 {
1861 .hook = ip_vs_local_reply6,
1862 .owner = THIS_MODULE,
1863 .pf = PF_INET,
1864 .hooknum = NF_INET_LOCAL_OUT,
1865 .priority = -99,
1866 },
1867 /* After mangle, schedule and forward local requests */
1868 {
1869 .hook = ip_vs_local_request6,
1870 .owner = THIS_MODULE,
1871 .pf = PF_INET6,
1872 .hooknum = NF_INET_LOCAL_OUT,
1873 .priority = -98,
1874 },
1875 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1876 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1877 {
1878 .hook = ip_vs_forward_icmp_v6,
1879 .owner = THIS_MODULE,
1880 .pf = PF_INET6,
1881 .hooknum = NF_INET_FORWARD,
1882 .priority = 99,
1883 },
1884 /* After packet filtering, change source only for VS/NAT */
1885 {
1886 .hook = ip_vs_reply6,
1887 .owner = THIS_MODULE,
1888 .pf = PF_INET6,
1889 .hooknum = NF_INET_FORWARD,
1890 .priority = 100,
1891 },
1892 #endif
1893 };
1894 /*
1895 * Initialize IP Virtual Server netns mem.
1896 */
1897 static int __net_init __ip_vs_init(struct net *net)
1898 {
1899 struct netns_ipvs *ipvs;
1900
1901 ipvs = net_generic(net, ip_vs_net_id);
1902 if (ipvs == NULL) {
1903 pr_err("%s(): no memory.\n", __func__);
1904 return -ENOMEM;
1905 }
1906 /* Hold the beast until a service is registerd */
1907 ipvs->enable = 0;
1908 ipvs->net = net;
1909 /* Counters used for creating unique names */
1910 ipvs->gen = atomic_read(&ipvs_netns_cnt);
1911 atomic_inc(&ipvs_netns_cnt);
1912 net->ipvs = ipvs;
1913
1914 if (__ip_vs_estimator_init(net) < 0)
1915 goto estimator_fail;
1916
1917 if (__ip_vs_control_init(net) < 0)
1918 goto control_fail;
1919
1920 if (__ip_vs_protocol_init(net) < 0)
1921 goto protocol_fail;
1922
1923 if (__ip_vs_app_init(net) < 0)
1924 goto app_fail;
1925
1926 if (__ip_vs_conn_init(net) < 0)
1927 goto conn_fail;
1928
1929 if (__ip_vs_sync_init(net) < 0)
1930 goto sync_fail;
1931
1932 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
1933 sizeof(struct netns_ipvs), ipvs->gen);
1934 return 0;
1935 /*
1936 * Error handling
1937 */
1938
1939 sync_fail:
1940 __ip_vs_conn_cleanup(net);
1941 conn_fail:
1942 __ip_vs_app_cleanup(net);
1943 app_fail:
1944 __ip_vs_protocol_cleanup(net);
1945 protocol_fail:
1946 __ip_vs_control_cleanup(net);
1947 control_fail:
1948 __ip_vs_estimator_cleanup(net);
1949 estimator_fail:
1950 return -ENOMEM;
1951 }
1952
1953 static void __net_exit __ip_vs_cleanup(struct net *net)
1954 {
1955 __ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */
1956 __ip_vs_conn_cleanup(net);
1957 __ip_vs_app_cleanup(net);
1958 __ip_vs_protocol_cleanup(net);
1959 __ip_vs_control_cleanup(net);
1960 __ip_vs_estimator_cleanup(net);
1961 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1962 }
1963
1964 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1965 {
1966 EnterFunction(2);
1967 net_ipvs(net)->enable = 0; /* Disable packet reception */
1968 __ip_vs_sync_cleanup(net);
1969 LeaveFunction(2);
1970 }
1971
1972 static struct pernet_operations ipvs_core_ops = {
1973 .init = __ip_vs_init,
1974 .exit = __ip_vs_cleanup,
1975 .id = &ip_vs_net_id,
1976 .size = sizeof(struct netns_ipvs),
1977 };
1978
1979 static struct pernet_operations ipvs_core_dev_ops = {
1980 .exit = __ip_vs_dev_cleanup,
1981 };
1982
1983 /*
1984 * Initialize IP Virtual Server
1985 */
1986 static int __init ip_vs_init(void)
1987 {
1988 int ret;
1989
1990 ip_vs_estimator_init();
1991 ret = ip_vs_control_init();
1992 if (ret < 0) {
1993 pr_err("can't setup control.\n");
1994 goto cleanup_estimator;
1995 }
1996
1997 ip_vs_protocol_init();
1998
1999 ret = ip_vs_app_init();
2000 if (ret < 0) {
2001 pr_err("can't setup application helper.\n");
2002 goto cleanup_protocol;
2003 }
2004
2005 ret = ip_vs_conn_init();
2006 if (ret < 0) {
2007 pr_err("can't setup connection table.\n");
2008 goto cleanup_app;
2009 }
2010
2011 ret = ip_vs_sync_init();
2012 if (ret < 0) {
2013 pr_err("can't setup sync data.\n");
2014 goto cleanup_conn;
2015 }
2016
2017 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2018 if (ret < 0)
2019 goto cleanup_sync;
2020
2021 ret = register_pernet_device(&ipvs_core_dev_ops);
2022 if (ret < 0)
2023 goto cleanup_sub;
2024
2025 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2026 if (ret < 0) {
2027 pr_err("can't register hooks.\n");
2028 goto cleanup_dev;
2029 }
2030
2031 pr_info("ipvs loaded.\n");
2032
2033 return ret;
2034
2035 cleanup_dev:
2036 unregister_pernet_device(&ipvs_core_dev_ops);
2037 cleanup_sub:
2038 unregister_pernet_subsys(&ipvs_core_ops);
2039 cleanup_sync:
2040 ip_vs_sync_cleanup();
2041 cleanup_conn:
2042 ip_vs_conn_cleanup();
2043 cleanup_app:
2044 ip_vs_app_cleanup();
2045 cleanup_protocol:
2046 ip_vs_protocol_cleanup();
2047 ip_vs_control_cleanup();
2048 cleanup_estimator:
2049 ip_vs_estimator_cleanup();
2050 return ret;
2051 }
2052
2053 static void __exit ip_vs_cleanup(void)
2054 {
2055 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2056 unregister_pernet_device(&ipvs_core_dev_ops);
2057 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2058 ip_vs_sync_cleanup();
2059 ip_vs_conn_cleanup();
2060 ip_vs_app_cleanup();
2061 ip_vs_protocol_cleanup();
2062 ip_vs_control_cleanup();
2063 ip_vs_estimator_cleanup();
2064 pr_info("ipvs unloaded.\n");
2065 }
2066
2067 module_init(ip_vs_init);
2068 module_exit(ip_vs_cleanup);
2069 MODULE_LICENSE("GPL");