Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / ipvs / ip_vs_core.c
1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others.
20 *
21 * Changes:
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
24 *
25 */
26
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
45
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54
55 #include <net/ip_vs.h>
56
57
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71
72 static int ip_vs_net_id __read_mostly;
73 /* netns cnt used for uniqueness */
74 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
75
76 /* ID used in ICMP lookups */
77 #define icmp_id(icmph) (((icmph)->un).echo.id)
78 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
79
80 const char *ip_vs_proto_name(unsigned int proto)
81 {
82 static char buf[20];
83
84 switch (proto) {
85 case IPPROTO_IP:
86 return "IP";
87 case IPPROTO_UDP:
88 return "UDP";
89 case IPPROTO_TCP:
90 return "TCP";
91 case IPPROTO_SCTP:
92 return "SCTP";
93 case IPPROTO_ICMP:
94 return "ICMP";
95 #ifdef CONFIG_IP_VS_IPV6
96 case IPPROTO_ICMPV6:
97 return "ICMPv6";
98 #endif
99 default:
100 sprintf(buf, "IP_%d", proto);
101 return buf;
102 }
103 }
104
105 void ip_vs_init_hash_table(struct list_head *table, int rows)
106 {
107 while (--rows >= 0)
108 INIT_LIST_HEAD(&table[rows]);
109 }
110
111 static inline void
112 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
113 {
114 struct ip_vs_dest *dest = cp->dest;
115 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
116
117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 struct ip_vs_cpu_stats *s;
119
120 s = this_cpu_ptr(dest->stats.cpustats);
121 s->ustats.inpkts++;
122 u64_stats_update_begin(&s->syncp);
123 s->ustats.inbytes += skb->len;
124 u64_stats_update_end(&s->syncp);
125
126 s = this_cpu_ptr(dest->svc->stats.cpustats);
127 s->ustats.inpkts++;
128 u64_stats_update_begin(&s->syncp);
129 s->ustats.inbytes += skb->len;
130 u64_stats_update_end(&s->syncp);
131
132 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
133 s->ustats.inpkts++;
134 u64_stats_update_begin(&s->syncp);
135 s->ustats.inbytes += skb->len;
136 u64_stats_update_end(&s->syncp);
137 }
138 }
139
140
141 static inline void
142 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
143 {
144 struct ip_vs_dest *dest = cp->dest;
145 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
146
147 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
148 struct ip_vs_cpu_stats *s;
149
150 s = this_cpu_ptr(dest->stats.cpustats);
151 s->ustats.outpkts++;
152 u64_stats_update_begin(&s->syncp);
153 s->ustats.outbytes += skb->len;
154 u64_stats_update_end(&s->syncp);
155
156 s = this_cpu_ptr(dest->svc->stats.cpustats);
157 s->ustats.outpkts++;
158 u64_stats_update_begin(&s->syncp);
159 s->ustats.outbytes += skb->len;
160 u64_stats_update_end(&s->syncp);
161
162 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
163 s->ustats.outpkts++;
164 u64_stats_update_begin(&s->syncp);
165 s->ustats.outbytes += skb->len;
166 u64_stats_update_end(&s->syncp);
167 }
168 }
169
170
171 static inline void
172 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
173 {
174 struct netns_ipvs *ipvs = net_ipvs(svc->net);
175 struct ip_vs_cpu_stats *s;
176
177 s = this_cpu_ptr(cp->dest->stats.cpustats);
178 s->ustats.conns++;
179
180 s = this_cpu_ptr(svc->stats.cpustats);
181 s->ustats.conns++;
182
183 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
184 s->ustats.conns++;
185 }
186
187
188 static inline void
189 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
190 const struct sk_buff *skb,
191 struct ip_vs_proto_data *pd)
192 {
193 if (likely(pd->pp->state_transition))
194 pd->pp->state_transition(cp, direction, skb, pd);
195 }
196
197 static inline int
198 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
199 struct sk_buff *skb, int protocol,
200 const union nf_inet_addr *caddr, __be16 cport,
201 const union nf_inet_addr *vaddr, __be16 vport,
202 struct ip_vs_conn_param *p)
203 {
204 ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
205 vport, p);
206 p->pe = rcu_dereference(svc->pe);
207 if (p->pe && p->pe->fill_param)
208 return p->pe->fill_param(p, skb);
209
210 return 0;
211 }
212
213 /*
214 * IPVS persistent scheduling function
215 * It creates a connection entry according to its template if exists,
216 * or selects a server and creates a connection entry plus a template.
217 * Locking: we are svc user (svc->refcnt), so we hold all dests too
218 * Protocols supported: TCP, UDP
219 */
220 static struct ip_vs_conn *
221 ip_vs_sched_persist(struct ip_vs_service *svc,
222 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
223 int *ignored, struct ip_vs_iphdr *iph)
224 {
225 struct ip_vs_conn *cp = NULL;
226 struct ip_vs_dest *dest;
227 struct ip_vs_conn *ct;
228 __be16 dport = 0; /* destination port to forward */
229 unsigned int flags;
230 struct ip_vs_conn_param param;
231 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
232 union nf_inet_addr snet; /* source network of the client,
233 after masking */
234
235 /* Mask saddr with the netmask to adjust template granularity */
236 #ifdef CONFIG_IP_VS_IPV6
237 if (svc->af == AF_INET6)
238 ipv6_addr_prefix(&snet.in6, &iph->saddr.in6,
239 (__force __u32) svc->netmask);
240 else
241 #endif
242 snet.ip = iph->saddr.ip & svc->netmask;
243
244 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
245 "mnet %s\n",
246 IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port),
247 IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port),
248 IP_VS_DBG_ADDR(svc->af, &snet));
249
250 /*
251 * As far as we know, FTP is a very complicated network protocol, and
252 * it uses control connection and data connections. For active FTP,
253 * FTP server initialize data connection to the client, its source port
254 * is often 20. For passive FTP, FTP server tells the clients the port
255 * that it passively listens to, and the client issues the data
256 * connection. In the tunneling or direct routing mode, the load
257 * balancer is on the client-to-server half of connection, the port
258 * number is unknown to the load balancer. So, a conn template like
259 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
260 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
261 * is created for other persistent services.
262 */
263 {
264 int protocol = iph->protocol;
265 const union nf_inet_addr *vaddr = &iph->daddr;
266 __be16 vport = 0;
267
268 if (dst_port == svc->port) {
269 /* non-FTP template:
270 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
271 * FTP template:
272 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
273 */
274 if (svc->port != FTPPORT)
275 vport = dst_port;
276 } else {
277 /* Note: persistent fwmark-based services and
278 * persistent port zero service are handled here.
279 * fwmark template:
280 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
281 * port zero template:
282 * <protocol,caddr,0,vaddr,0,daddr,0>
283 */
284 if (svc->fwmark) {
285 protocol = IPPROTO_IP;
286 vaddr = &fwmark;
287 }
288 }
289 /* return *ignored = -1 so NF_DROP can be used */
290 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
291 vaddr, vport, &param) < 0) {
292 *ignored = -1;
293 return NULL;
294 }
295 }
296
297 /* Check if a template already exists */
298 ct = ip_vs_ct_in_get(&param);
299 if (!ct || !ip_vs_check_template(ct)) {
300 struct ip_vs_scheduler *sched;
301
302 /*
303 * No template found or the dest of the connection
304 * template is not available.
305 * return *ignored=0 i.e. ICMP and NF_DROP
306 */
307 sched = rcu_dereference(svc->scheduler);
308 dest = sched->schedule(svc, skb);
309 if (!dest) {
310 IP_VS_DBG(1, "p-schedule: no dest found.\n");
311 kfree(param.pe_data);
312 *ignored = 0;
313 return NULL;
314 }
315
316 if (dst_port == svc->port && svc->port != FTPPORT)
317 dport = dest->port;
318
319 /* Create a template
320 * This adds param.pe_data to the template,
321 * and thus param.pe_data will be destroyed
322 * when the template expires */
323 ct = ip_vs_conn_new(&param, &dest->addr, dport,
324 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
325 if (ct == NULL) {
326 kfree(param.pe_data);
327 *ignored = -1;
328 return NULL;
329 }
330
331 ct->timeout = svc->timeout;
332 } else {
333 /* set destination with the found template */
334 dest = ct->dest;
335 kfree(param.pe_data);
336 }
337
338 dport = dst_port;
339 if (dport == svc->port && dest->port)
340 dport = dest->port;
341
342 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
343 && iph->protocol == IPPROTO_UDP) ?
344 IP_VS_CONN_F_ONE_PACKET : 0;
345
346 /*
347 * Create a new connection according to the template
348 */
349 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
350 src_port, &iph->daddr, dst_port, &param);
351
352 cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
353 if (cp == NULL) {
354 ip_vs_conn_put(ct);
355 *ignored = -1;
356 return NULL;
357 }
358
359 /*
360 * Add its control
361 */
362 ip_vs_control_add(cp, ct);
363 ip_vs_conn_put(ct);
364
365 ip_vs_conn_stats(cp, svc);
366 return cp;
367 }
368
369
370 /*
371 * IPVS main scheduling function
372 * It selects a server according to the virtual service, and
373 * creates a connection entry.
374 * Protocols supported: TCP, UDP
375 *
376 * Usage of *ignored
377 *
378 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
379 * svc/scheduler decides that this packet should be accepted with
380 * NF_ACCEPT because it must not be scheduled.
381 *
382 * 0 : scheduler can not find destination, so try bypass or
383 * return ICMP and then NF_DROP (ip_vs_leave).
384 *
385 * -1 : scheduler tried to schedule but fatal error occurred, eg.
386 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
387 * failure such as missing Call-ID, ENOMEM on skb_linearize
388 * or pe_data. In this case we should return NF_DROP without
389 * any attempts to send ICMP with ip_vs_leave.
390 */
391 struct ip_vs_conn *
392 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
393 struct ip_vs_proto_data *pd, int *ignored,
394 struct ip_vs_iphdr *iph)
395 {
396 struct ip_vs_protocol *pp = pd->pp;
397 struct ip_vs_conn *cp = NULL;
398 struct ip_vs_scheduler *sched;
399 struct ip_vs_dest *dest;
400 __be16 _ports[2], *pptr;
401 unsigned int flags;
402
403 *ignored = 1;
404 /*
405 * IPv6 frags, only the first hit here.
406 */
407 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
408 if (pptr == NULL)
409 return NULL;
410
411 /*
412 * FTPDATA needs this check when using local real server.
413 * Never schedule Active FTPDATA connections from real server.
414 * For LVS-NAT they must be already created. For other methods
415 * with persistence the connection is created on SYN+ACK.
416 */
417 if (pptr[0] == FTPDATA) {
418 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
419 "Not scheduling FTPDATA");
420 return NULL;
421 }
422
423 /*
424 * Do not schedule replies from local real server.
425 */
426 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
427 (cp = pp->conn_in_get(svc->af, skb, iph, 1))) {
428 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
429 "Not scheduling reply for existing connection");
430 __ip_vs_conn_put(cp);
431 return NULL;
432 }
433
434 /*
435 * Persistent service
436 */
437 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
438 return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored,
439 iph);
440
441 *ignored = 0;
442
443 /*
444 * Non-persistent service
445 */
446 if (!svc->fwmark && pptr[1] != svc->port) {
447 if (!svc->port)
448 pr_err("Schedule: port zero only supported "
449 "in persistent services, "
450 "check your ipvs configuration\n");
451 return NULL;
452 }
453
454 sched = rcu_dereference(svc->scheduler);
455 dest = sched->schedule(svc, skb);
456 if (dest == NULL) {
457 IP_VS_DBG(1, "Schedule: no dest found.\n");
458 return NULL;
459 }
460
461 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
462 && iph->protocol == IPPROTO_UDP) ?
463 IP_VS_CONN_F_ONE_PACKET : 0;
464
465 /*
466 * Create a connection entry.
467 */
468 {
469 struct ip_vs_conn_param p;
470
471 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
472 &iph->saddr, pptr[0], &iph->daddr,
473 pptr[1], &p);
474 cp = ip_vs_conn_new(&p, &dest->addr,
475 dest->port ? dest->port : pptr[1],
476 flags, dest, skb->mark);
477 if (!cp) {
478 *ignored = -1;
479 return NULL;
480 }
481 }
482
483 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
484 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
485 ip_vs_fwd_tag(cp),
486 IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
487 IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
488 IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
489 cp->flags, atomic_read(&cp->refcnt));
490
491 ip_vs_conn_stats(cp, svc);
492 return cp;
493 }
494
495
496 /*
497 * Pass or drop the packet.
498 * Called by ip_vs_in, when the virtual service is available but
499 * no destination is available for a new connection.
500 */
501 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
502 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
503 {
504 __be16 _ports[2], *pptr;
505 #ifdef CONFIG_SYSCTL
506 struct net *net;
507 struct netns_ipvs *ipvs;
508 int unicast;
509 #endif
510
511 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
512 if (pptr == NULL) {
513 return NF_DROP;
514 }
515
516 #ifdef CONFIG_SYSCTL
517 net = skb_net(skb);
518
519 #ifdef CONFIG_IP_VS_IPV6
520 if (svc->af == AF_INET6)
521 unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST;
522 else
523 #endif
524 unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST);
525
526 /* if it is fwmark-based service, the cache_bypass sysctl is up
527 and the destination is a non-local unicast, then create
528 a cache_bypass connection entry */
529 ipvs = net_ipvs(net);
530 if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
531 int ret;
532 struct ip_vs_conn *cp;
533 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
534 iph->protocol == IPPROTO_UDP) ?
535 IP_VS_CONN_F_ONE_PACKET : 0;
536 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
537
538 /* create a new connection entry */
539 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
540 {
541 struct ip_vs_conn_param p;
542 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
543 &iph->saddr, pptr[0],
544 &iph->daddr, pptr[1], &p);
545 cp = ip_vs_conn_new(&p, &daddr, 0,
546 IP_VS_CONN_F_BYPASS | flags,
547 NULL, skb->mark);
548 if (!cp)
549 return NF_DROP;
550 }
551
552 /* statistics */
553 ip_vs_in_stats(cp, skb);
554
555 /* set state */
556 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
557
558 /* transmit the first SYN packet */
559 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
560 /* do not touch skb anymore */
561
562 atomic_inc(&cp->in_pkts);
563 ip_vs_conn_put(cp);
564 return ret;
565 }
566 #endif
567
568 /*
569 * When the virtual ftp service is presented, packets destined
570 * for other services on the VIP may get here (except services
571 * listed in the ipvs table), pass the packets, because it is
572 * not ipvs job to decide to drop the packets.
573 */
574 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT))
575 return NF_ACCEPT;
576
577 /*
578 * Notify the client that the destination is unreachable, and
579 * release the socket buffer.
580 * Since it is in IP layer, the TCP socket is not actually
581 * created, the TCP RST packet cannot be sent, instead that
582 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
583 */
584 #ifdef CONFIG_IP_VS_IPV6
585 if (svc->af == AF_INET6) {
586 if (!skb->dev) {
587 struct net *net_ = dev_net(skb_dst(skb)->dev);
588
589 skb->dev = net_->loopback_dev;
590 }
591 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
592 } else
593 #endif
594 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
595
596 return NF_DROP;
597 }
598
599 #ifdef CONFIG_SYSCTL
600
601 static int sysctl_snat_reroute(struct sk_buff *skb)
602 {
603 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
604 return ipvs->sysctl_snat_reroute;
605 }
606
607 static int sysctl_nat_icmp_send(struct net *net)
608 {
609 struct netns_ipvs *ipvs = net_ipvs(net);
610 return ipvs->sysctl_nat_icmp_send;
611 }
612
613 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
614 {
615 return ipvs->sysctl_expire_nodest_conn;
616 }
617
618 #else
619
620 static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
621 static int sysctl_nat_icmp_send(struct net *net) { return 0; }
622 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
623
624 #endif
625
626 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
627 {
628 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
629 }
630
631 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
632 {
633 if (NF_INET_LOCAL_IN == hooknum)
634 return IP_DEFRAG_VS_IN;
635 if (NF_INET_FORWARD == hooknum)
636 return IP_DEFRAG_VS_FWD;
637 return IP_DEFRAG_VS_OUT;
638 }
639
640 static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
641 {
642 int err;
643
644 local_bh_disable();
645 err = ip_defrag(skb, user);
646 local_bh_enable();
647 if (!err)
648 ip_send_check(ip_hdr(skb));
649
650 return err;
651 }
652
653 static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
654 {
655 #ifdef CONFIG_IP_VS_IPV6
656 if (af == AF_INET6) {
657 if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
658 return 1;
659 } else
660 #endif
661 if ((sysctl_snat_reroute(skb) ||
662 skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
663 ip_route_me_harder(skb, RTN_LOCAL) != 0)
664 return 1;
665
666 return 0;
667 }
668
669 /*
670 * Packet has been made sufficiently writable in caller
671 * - inout: 1=in->out, 0=out->in
672 */
673 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
674 struct ip_vs_conn *cp, int inout)
675 {
676 struct iphdr *iph = ip_hdr(skb);
677 unsigned int icmp_offset = iph->ihl*4;
678 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
679 icmp_offset);
680 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
681
682 if (inout) {
683 iph->saddr = cp->vaddr.ip;
684 ip_send_check(iph);
685 ciph->daddr = cp->vaddr.ip;
686 ip_send_check(ciph);
687 } else {
688 iph->daddr = cp->daddr.ip;
689 ip_send_check(iph);
690 ciph->saddr = cp->daddr.ip;
691 ip_send_check(ciph);
692 }
693
694 /* the TCP/UDP/SCTP port */
695 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
696 IPPROTO_SCTP == ciph->protocol) {
697 __be16 *ports = (void *)ciph + ciph->ihl*4;
698
699 if (inout)
700 ports[1] = cp->vport;
701 else
702 ports[0] = cp->dport;
703 }
704
705 /* And finally the ICMP checksum */
706 icmph->checksum = 0;
707 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
708 skb->ip_summed = CHECKSUM_UNNECESSARY;
709
710 if (inout)
711 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
712 "Forwarding altered outgoing ICMP");
713 else
714 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
715 "Forwarding altered incoming ICMP");
716 }
717
718 #ifdef CONFIG_IP_VS_IPV6
719 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
720 struct ip_vs_conn *cp, int inout)
721 {
722 struct ipv6hdr *iph = ipv6_hdr(skb);
723 unsigned int icmp_offset = 0;
724 unsigned int offs = 0; /* header offset*/
725 int protocol;
726 struct icmp6hdr *icmph;
727 struct ipv6hdr *ciph;
728 unsigned short fragoffs;
729
730 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
731 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
732 offs = icmp_offset + sizeof(struct icmp6hdr);
733 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
734
735 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
736
737 if (inout) {
738 iph->saddr = cp->vaddr.in6;
739 ciph->daddr = cp->vaddr.in6;
740 } else {
741 iph->daddr = cp->daddr.in6;
742 ciph->saddr = cp->daddr.in6;
743 }
744
745 /* the TCP/UDP/SCTP port */
746 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
747 IPPROTO_SCTP == protocol)) {
748 __be16 *ports = (void *)(skb_network_header(skb) + offs);
749
750 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
751 ntohs(inout ? ports[1] : ports[0]),
752 ntohs(inout ? cp->vport : cp->dport));
753 if (inout)
754 ports[1] = cp->vport;
755 else
756 ports[0] = cp->dport;
757 }
758
759 /* And finally the ICMP checksum */
760 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
761 skb->len - icmp_offset,
762 IPPROTO_ICMPV6, 0);
763 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
764 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
765 skb->ip_summed = CHECKSUM_PARTIAL;
766
767 if (inout)
768 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
769 (void *)ciph - (void *)iph,
770 "Forwarding altered outgoing ICMPv6");
771 else
772 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
773 (void *)ciph - (void *)iph,
774 "Forwarding altered incoming ICMPv6");
775 }
776 #endif
777
778 /* Handle relevant response ICMP messages - forward to the right
779 * destination host.
780 */
781 static int handle_response_icmp(int af, struct sk_buff *skb,
782 union nf_inet_addr *snet,
783 __u8 protocol, struct ip_vs_conn *cp,
784 struct ip_vs_protocol *pp,
785 unsigned int offset, unsigned int ihl)
786 {
787 unsigned int verdict = NF_DROP;
788
789 if (IP_VS_FWD_METHOD(cp) != 0) {
790 pr_err("shouldn't reach here, because the box is on the "
791 "half connection in the tun/dr module.\n");
792 }
793
794 /* Ensure the checksum is correct */
795 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
796 /* Failed checksum! */
797 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
798 IP_VS_DBG_ADDR(af, snet));
799 goto out;
800 }
801
802 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
803 IPPROTO_SCTP == protocol)
804 offset += 2 * sizeof(__u16);
805 if (!skb_make_writable(skb, offset))
806 goto out;
807
808 #ifdef CONFIG_IP_VS_IPV6
809 if (af == AF_INET6)
810 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
811 else
812 #endif
813 ip_vs_nat_icmp(skb, pp, cp, 1);
814
815 if (ip_vs_route_me_harder(af, skb))
816 goto out;
817
818 /* do the statistics and put it back */
819 ip_vs_out_stats(cp, skb);
820
821 skb->ipvs_property = 1;
822 if (!(cp->flags & IP_VS_CONN_F_NFCT))
823 ip_vs_notrack(skb);
824 else
825 ip_vs_update_conntrack(skb, cp, 0);
826 verdict = NF_ACCEPT;
827
828 out:
829 __ip_vs_conn_put(cp);
830
831 return verdict;
832 }
833
834 /*
835 * Handle ICMP messages in the inside-to-outside direction (outgoing).
836 * Find any that might be relevant, check against existing connections.
837 * Currently handles error types - unreachable, quench, ttl exceeded.
838 */
839 static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
840 unsigned int hooknum)
841 {
842 struct iphdr *iph;
843 struct icmphdr _icmph, *ic;
844 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
845 struct ip_vs_iphdr ciph;
846 struct ip_vs_conn *cp;
847 struct ip_vs_protocol *pp;
848 unsigned int offset, ihl;
849 union nf_inet_addr snet;
850
851 *related = 1;
852
853 /* reassemble IP fragments */
854 if (ip_is_fragment(ip_hdr(skb))) {
855 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
856 return NF_STOLEN;
857 }
858
859 iph = ip_hdr(skb);
860 offset = ihl = iph->ihl * 4;
861 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
862 if (ic == NULL)
863 return NF_DROP;
864
865 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
866 ic->type, ntohs(icmp_id(ic)),
867 &iph->saddr, &iph->daddr);
868
869 /*
870 * Work through seeing if this is for us.
871 * These checks are supposed to be in an order that means easy
872 * things are checked first to speed up processing.... however
873 * this means that some packets will manage to get a long way
874 * down this stack and then be rejected, but that's life.
875 */
876 if ((ic->type != ICMP_DEST_UNREACH) &&
877 (ic->type != ICMP_SOURCE_QUENCH) &&
878 (ic->type != ICMP_TIME_EXCEEDED)) {
879 *related = 0;
880 return NF_ACCEPT;
881 }
882
883 /* Now find the contained IP header */
884 offset += sizeof(_icmph);
885 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
886 if (cih == NULL)
887 return NF_ACCEPT; /* The packet looks wrong, ignore */
888
889 pp = ip_vs_proto_get(cih->protocol);
890 if (!pp)
891 return NF_ACCEPT;
892
893 /* Is the embedded protocol header present? */
894 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
895 pp->dont_defrag))
896 return NF_ACCEPT;
897
898 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
899 "Checking outgoing ICMP for");
900
901 ip_vs_fill_ip4hdr(cih, &ciph);
902 ciph.len += offset;
903 /* The embedded headers contain source and dest in reverse order */
904 cp = pp->conn_out_get(AF_INET, skb, &ciph, 1);
905 if (!cp)
906 return NF_ACCEPT;
907
908 snet.ip = iph->saddr;
909 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
910 pp, ciph.len, ihl);
911 }
912
913 #ifdef CONFIG_IP_VS_IPV6
914 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
915 unsigned int hooknum, struct ip_vs_iphdr *ipvsh)
916 {
917 struct icmp6hdr _icmph, *ic;
918 struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */
919 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
920 struct ip_vs_conn *cp;
921 struct ip_vs_protocol *pp;
922 union nf_inet_addr snet;
923 unsigned int writable;
924
925 *related = 1;
926 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
927 if (ic == NULL)
928 return NF_DROP;
929
930 /*
931 * Work through seeing if this is for us.
932 * These checks are supposed to be in an order that means easy
933 * things are checked first to speed up processing.... however
934 * this means that some packets will manage to get a long way
935 * down this stack and then be rejected, but that's life.
936 */
937 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
938 *related = 0;
939 return NF_ACCEPT;
940 }
941 /* Fragment header that is before ICMP header tells us that:
942 * it's not an error message since they can't be fragmented.
943 */
944 if (ipvsh->flags & IP6_FH_F_FRAG)
945 return NF_DROP;
946
947 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
948 ic->icmp6_type, ntohs(icmpv6_id(ic)),
949 &ipvsh->saddr, &ipvsh->daddr);
950
951 /* Now find the contained IP header */
952 ciph.len = ipvsh->len + sizeof(_icmph);
953 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
954 if (ip6h == NULL)
955 return NF_ACCEPT; /* The packet looks wrong, ignore */
956 ciph.saddr.in6 = ip6h->saddr; /* conn_out_get() handles reverse order */
957 ciph.daddr.in6 = ip6h->daddr;
958 /* skip possible IPv6 exthdrs of contained IPv6 packet */
959 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
960 if (ciph.protocol < 0)
961 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
962
963 pp = ip_vs_proto_get(ciph.protocol);
964 if (!pp)
965 return NF_ACCEPT;
966
967 /* The embedded headers contain source and dest in reverse order */
968 cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1);
969 if (!cp)
970 return NF_ACCEPT;
971
972 snet.in6 = ciph.saddr.in6;
973 writable = ciph.len;
974 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
975 pp, writable, sizeof(struct ipv6hdr));
976 }
977 #endif
978
979 /*
980 * Check if sctp chunc is ABORT chunk
981 */
982 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
983 {
984 sctp_chunkhdr_t *sch, schunk;
985 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
986 sizeof(schunk), &schunk);
987 if (sch == NULL)
988 return 0;
989 if (sch->type == SCTP_CID_ABORT)
990 return 1;
991 return 0;
992 }
993
994 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
995 {
996 struct tcphdr _tcph, *th;
997
998 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
999 if (th == NULL)
1000 return 0;
1001 return th->rst;
1002 }
1003
1004 static inline bool is_new_conn(const struct sk_buff *skb,
1005 struct ip_vs_iphdr *iph)
1006 {
1007 switch (iph->protocol) {
1008 case IPPROTO_TCP: {
1009 struct tcphdr _tcph, *th;
1010
1011 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1012 if (th == NULL)
1013 return false;
1014 return th->syn;
1015 }
1016 case IPPROTO_SCTP: {
1017 sctp_chunkhdr_t *sch, schunk;
1018
1019 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
1020 sizeof(schunk), &schunk);
1021 if (sch == NULL)
1022 return false;
1023 return sch->type == SCTP_CID_INIT;
1024 }
1025 default:
1026 return false;
1027 }
1028 }
1029
1030 /* Handle response packets: rewrite addresses and send away...
1031 */
1032 static unsigned int
1033 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1034 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
1035 {
1036 struct ip_vs_protocol *pp = pd->pp;
1037
1038 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
1039
1040 if (!skb_make_writable(skb, iph->len))
1041 goto drop;
1042
1043 /* mangle the packet */
1044 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1045 goto drop;
1046
1047 #ifdef CONFIG_IP_VS_IPV6
1048 if (af == AF_INET6)
1049 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1050 else
1051 #endif
1052 {
1053 ip_hdr(skb)->saddr = cp->vaddr.ip;
1054 ip_send_check(ip_hdr(skb));
1055 }
1056
1057 /*
1058 * nf_iterate does not expect change in the skb->dst->dev.
1059 * It looks like it is not fatal to enable this code for hooks
1060 * where our handlers are at the end of the chain list and
1061 * when all next handlers use skb->dst->dev and not outdev.
1062 * It will definitely route properly the inout NAT traffic
1063 * when multiple paths are used.
1064 */
1065
1066 /* For policy routing, packets originating from this
1067 * machine itself may be routed differently to packets
1068 * passing through. We want this packet to be routed as
1069 * if it came from this machine itself. So re-compute
1070 * the routing information.
1071 */
1072 if (ip_vs_route_me_harder(af, skb))
1073 goto drop;
1074
1075 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
1076
1077 ip_vs_out_stats(cp, skb);
1078 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1079 skb->ipvs_property = 1;
1080 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1081 ip_vs_notrack(skb);
1082 else
1083 ip_vs_update_conntrack(skb, cp, 0);
1084 ip_vs_conn_put(cp);
1085
1086 LeaveFunction(11);
1087 return NF_ACCEPT;
1088
1089 drop:
1090 ip_vs_conn_put(cp);
1091 kfree_skb(skb);
1092 LeaveFunction(11);
1093 return NF_STOLEN;
1094 }
1095
1096 /*
1097 * Check if outgoing packet belongs to the established ip_vs_conn.
1098 */
1099 static unsigned int
1100 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1101 {
1102 struct net *net = NULL;
1103 struct ip_vs_iphdr iph;
1104 struct ip_vs_protocol *pp;
1105 struct ip_vs_proto_data *pd;
1106 struct ip_vs_conn *cp;
1107
1108 EnterFunction(11);
1109
1110 /* Already marked as IPVS request or reply? */
1111 if (skb->ipvs_property)
1112 return NF_ACCEPT;
1113
1114 /* Bad... Do not break raw sockets */
1115 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1116 af == AF_INET)) {
1117 struct sock *sk = skb->sk;
1118 struct inet_sock *inet = inet_sk(skb->sk);
1119
1120 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1121 return NF_ACCEPT;
1122 }
1123
1124 if (unlikely(!skb_dst(skb)))
1125 return NF_ACCEPT;
1126
1127 net = skb_net(skb);
1128 if (!net_ipvs(net)->enable)
1129 return NF_ACCEPT;
1130
1131 ip_vs_fill_iph_skb(af, skb, &iph);
1132 #ifdef CONFIG_IP_VS_IPV6
1133 if (af == AF_INET6) {
1134 if (!iph.fragoffs && skb_nfct_reasm(skb)) {
1135 struct sk_buff *reasm = skb_nfct_reasm(skb);
1136 /* Save fw mark for coming frags */
1137 reasm->ipvs_property = 1;
1138 reasm->mark = skb->mark;
1139 }
1140 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1141 int related;
1142 int verdict = ip_vs_out_icmp_v6(skb, &related,
1143 hooknum, &iph);
1144
1145 if (related)
1146 return verdict;
1147 }
1148 } else
1149 #endif
1150 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1151 int related;
1152 int verdict = ip_vs_out_icmp(skb, &related, hooknum);
1153
1154 if (related)
1155 return verdict;
1156 }
1157
1158 pd = ip_vs_proto_data_get(net, iph.protocol);
1159 if (unlikely(!pd))
1160 return NF_ACCEPT;
1161 pp = pd->pp;
1162
1163 /* reassemble IP fragments */
1164 #ifdef CONFIG_IP_VS_IPV6
1165 if (af == AF_INET)
1166 #endif
1167 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1168 if (ip_vs_gather_frags(skb,
1169 ip_vs_defrag_user(hooknum)))
1170 return NF_STOLEN;
1171
1172 ip_vs_fill_ip4hdr(skb_network_header(skb), &iph);
1173 }
1174
1175 /*
1176 * Check if the packet belongs to an existing entry
1177 */
1178 cp = pp->conn_out_get(af, skb, &iph, 0);
1179
1180 if (likely(cp))
1181 return handle_response(af, skb, pd, cp, &iph);
1182 if (sysctl_nat_icmp_send(net) &&
1183 (pp->protocol == IPPROTO_TCP ||
1184 pp->protocol == IPPROTO_UDP ||
1185 pp->protocol == IPPROTO_SCTP)) {
1186 __be16 _ports[2], *pptr;
1187
1188 pptr = frag_safe_skb_hp(skb, iph.len,
1189 sizeof(_ports), _ports, &iph);
1190 if (pptr == NULL)
1191 return NF_ACCEPT; /* Not for me */
1192 if (ip_vs_has_real_service(net, af, iph.protocol, &iph.saddr,
1193 pptr[0])) {
1194 /*
1195 * Notify the real server: there is no
1196 * existing entry if it is not RST
1197 * packet or not TCP packet.
1198 */
1199 if ((iph.protocol != IPPROTO_TCP &&
1200 iph.protocol != IPPROTO_SCTP)
1201 || ((iph.protocol == IPPROTO_TCP
1202 && !is_tcp_reset(skb, iph.len))
1203 || (iph.protocol == IPPROTO_SCTP
1204 && !is_sctp_abort(skb,
1205 iph.len)))) {
1206 #ifdef CONFIG_IP_VS_IPV6
1207 if (af == AF_INET6) {
1208 if (!skb->dev)
1209 skb->dev = net->loopback_dev;
1210 icmpv6_send(skb,
1211 ICMPV6_DEST_UNREACH,
1212 ICMPV6_PORT_UNREACH,
1213 0);
1214 } else
1215 #endif
1216 icmp_send(skb,
1217 ICMP_DEST_UNREACH,
1218 ICMP_PORT_UNREACH, 0);
1219 return NF_DROP;
1220 }
1221 }
1222 }
1223 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1224 "ip_vs_out: packet continues traversal as normal");
1225 return NF_ACCEPT;
1226 }
1227
1228 /*
1229 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1230 * used only for VS/NAT.
1231 * Check if packet is reply for established ip_vs_conn.
1232 */
1233 static unsigned int
1234 ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
1235 const struct net_device *in, const struct net_device *out,
1236 int (*okfn)(struct sk_buff *))
1237 {
1238 return ip_vs_out(hooknum, skb, AF_INET);
1239 }
1240
1241 /*
1242 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1243 * Check if packet is reply for established ip_vs_conn.
1244 */
1245 static unsigned int
1246 ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
1247 const struct net_device *in, const struct net_device *out,
1248 int (*okfn)(struct sk_buff *))
1249 {
1250 return ip_vs_out(hooknum, skb, AF_INET);
1251 }
1252
1253 #ifdef CONFIG_IP_VS_IPV6
1254
1255 /*
1256 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1257 * used only for VS/NAT.
1258 * Check if packet is reply for established ip_vs_conn.
1259 */
1260 static unsigned int
1261 ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
1262 const struct net_device *in, const struct net_device *out,
1263 int (*okfn)(struct sk_buff *))
1264 {
1265 return ip_vs_out(hooknum, skb, AF_INET6);
1266 }
1267
1268 /*
1269 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1270 * Check if packet is reply for established ip_vs_conn.
1271 */
1272 static unsigned int
1273 ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
1274 const struct net_device *in, const struct net_device *out,
1275 int (*okfn)(struct sk_buff *))
1276 {
1277 return ip_vs_out(hooknum, skb, AF_INET6);
1278 }
1279
1280 #endif
1281
1282 /*
1283 * Handle ICMP messages in the outside-to-inside direction (incoming).
1284 * Find any that might be relevant, check against existing connections,
1285 * forward to the right destination host if relevant.
1286 * Currently handles error types - unreachable, quench, ttl exceeded.
1287 */
1288 static int
1289 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1290 {
1291 struct net *net = NULL;
1292 struct iphdr *iph;
1293 struct icmphdr _icmph, *ic;
1294 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1295 struct ip_vs_iphdr ciph;
1296 struct ip_vs_conn *cp;
1297 struct ip_vs_protocol *pp;
1298 struct ip_vs_proto_data *pd;
1299 unsigned int offset, offset2, ihl, verdict;
1300 bool ipip;
1301
1302 *related = 1;
1303
1304 /* reassemble IP fragments */
1305 if (ip_is_fragment(ip_hdr(skb))) {
1306 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
1307 return NF_STOLEN;
1308 }
1309
1310 iph = ip_hdr(skb);
1311 offset = ihl = iph->ihl * 4;
1312 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1313 if (ic == NULL)
1314 return NF_DROP;
1315
1316 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1317 ic->type, ntohs(icmp_id(ic)),
1318 &iph->saddr, &iph->daddr);
1319
1320 /*
1321 * Work through seeing if this is for us.
1322 * These checks are supposed to be in an order that means easy
1323 * things are checked first to speed up processing.... however
1324 * this means that some packets will manage to get a long way
1325 * down this stack and then be rejected, but that's life.
1326 */
1327 if ((ic->type != ICMP_DEST_UNREACH) &&
1328 (ic->type != ICMP_SOURCE_QUENCH) &&
1329 (ic->type != ICMP_TIME_EXCEEDED)) {
1330 *related = 0;
1331 return NF_ACCEPT;
1332 }
1333
1334 /* Now find the contained IP header */
1335 offset += sizeof(_icmph);
1336 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1337 if (cih == NULL)
1338 return NF_ACCEPT; /* The packet looks wrong, ignore */
1339
1340 net = skb_net(skb);
1341
1342 /* Special case for errors for IPIP packets */
1343 ipip = false;
1344 if (cih->protocol == IPPROTO_IPIP) {
1345 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1346 return NF_ACCEPT;
1347 /* Error for our IPIP must arrive at LOCAL_IN */
1348 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1349 return NF_ACCEPT;
1350 offset += cih->ihl * 4;
1351 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1352 if (cih == NULL)
1353 return NF_ACCEPT; /* The packet looks wrong, ignore */
1354 ipip = true;
1355 }
1356
1357 pd = ip_vs_proto_data_get(net, cih->protocol);
1358 if (!pd)
1359 return NF_ACCEPT;
1360 pp = pd->pp;
1361
1362 /* Is the embedded protocol header present? */
1363 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1364 pp->dont_defrag))
1365 return NF_ACCEPT;
1366
1367 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1368 "Checking incoming ICMP for");
1369
1370 offset2 = offset;
1371 ip_vs_fill_ip4hdr(cih, &ciph);
1372 ciph.len += offset;
1373 offset = ciph.len;
1374 /* The embedded headers contain source and dest in reverse order.
1375 * For IPIP this is error for request, not for reply.
1376 */
1377 cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1);
1378 if (!cp)
1379 return NF_ACCEPT;
1380
1381 verdict = NF_DROP;
1382
1383 /* Ensure the checksum is correct */
1384 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1385 /* Failed checksum! */
1386 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1387 &iph->saddr);
1388 goto out;
1389 }
1390
1391 if (ipip) {
1392 __be32 info = ic->un.gateway;
1393
1394 /* Update the MTU */
1395 if (ic->type == ICMP_DEST_UNREACH &&
1396 ic->code == ICMP_FRAG_NEEDED) {
1397 struct ip_vs_dest *dest = cp->dest;
1398 u32 mtu = ntohs(ic->un.frag.mtu);
1399
1400 /* Strip outer IP and ICMP, go to IPIP header */
1401 __skb_pull(skb, ihl + sizeof(_icmph));
1402 offset2 -= ihl + sizeof(_icmph);
1403 skb_reset_network_header(skb);
1404 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1405 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1406 ipv4_update_pmtu(skb, dev_net(skb->dev),
1407 mtu, 0, 0, 0, 0);
1408 /* Client uses PMTUD? */
1409 if (!(cih->frag_off & htons(IP_DF)))
1410 goto ignore_ipip;
1411 /* Prefer the resulting PMTU */
1412 if (dest) {
1413 struct ip_vs_dest_dst *dest_dst;
1414
1415 rcu_read_lock();
1416 dest_dst = rcu_dereference(dest->dest_dst);
1417 if (dest_dst)
1418 mtu = dst_mtu(dest_dst->dst_cache);
1419 rcu_read_unlock();
1420 }
1421 if (mtu > 68 + sizeof(struct iphdr))
1422 mtu -= sizeof(struct iphdr);
1423 info = htonl(mtu);
1424 }
1425 /* Strip outer IP, ICMP and IPIP, go to IP header of
1426 * original request.
1427 */
1428 __skb_pull(skb, offset2);
1429 skb_reset_network_header(skb);
1430 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1431 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1432 ic->type, ic->code, ntohl(info));
1433 icmp_send(skb, ic->type, ic->code, info);
1434 /* ICMP can be shorter but anyways, account it */
1435 ip_vs_out_stats(cp, skb);
1436
1437 ignore_ipip:
1438 consume_skb(skb);
1439 verdict = NF_STOLEN;
1440 goto out;
1441 }
1442
1443 /* do the statistics and put it back */
1444 ip_vs_in_stats(cp, skb);
1445 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1446 IPPROTO_SCTP == cih->protocol)
1447 offset += 2 * sizeof(__u16);
1448 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1449
1450 out:
1451 __ip_vs_conn_put(cp);
1452
1453 return verdict;
1454 }
1455
1456 #ifdef CONFIG_IP_VS_IPV6
1457 static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related,
1458 unsigned int hooknum, struct ip_vs_iphdr *iph)
1459 {
1460 struct net *net = NULL;
1461 struct ipv6hdr _ip6h, *ip6h;
1462 struct icmp6hdr _icmph, *ic;
1463 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1464 struct ip_vs_conn *cp;
1465 struct ip_vs_protocol *pp;
1466 struct ip_vs_proto_data *pd;
1467 unsigned int offs_ciph, writable, verdict;
1468
1469 *related = 1;
1470
1471 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
1472 if (ic == NULL)
1473 return NF_DROP;
1474
1475 /*
1476 * Work through seeing if this is for us.
1477 * These checks are supposed to be in an order that means easy
1478 * things are checked first to speed up processing.... however
1479 * this means that some packets will manage to get a long way
1480 * down this stack and then be rejected, but that's life.
1481 */
1482 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1483 *related = 0;
1484 return NF_ACCEPT;
1485 }
1486 /* Fragment header that is before ICMP header tells us that:
1487 * it's not an error message since they can't be fragmented.
1488 */
1489 if (iph->flags & IP6_FH_F_FRAG)
1490 return NF_DROP;
1491
1492 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1493 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1494 &iph->saddr, &iph->daddr);
1495
1496 /* Now find the contained IP header */
1497 ciph.len = iph->len + sizeof(_icmph);
1498 offs_ciph = ciph.len; /* Save ip header offset */
1499 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
1500 if (ip6h == NULL)
1501 return NF_ACCEPT; /* The packet looks wrong, ignore */
1502 ciph.saddr.in6 = ip6h->saddr; /* conn_in_get() handles reverse order */
1503 ciph.daddr.in6 = ip6h->daddr;
1504 /* skip possible IPv6 exthdrs of contained IPv6 packet */
1505 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
1506 if (ciph.protocol < 0)
1507 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
1508
1509 net = skb_net(skb);
1510 pd = ip_vs_proto_data_get(net, ciph.protocol);
1511 if (!pd)
1512 return NF_ACCEPT;
1513 pp = pd->pp;
1514
1515 /* Cannot handle fragmented embedded protocol */
1516 if (ciph.fragoffs)
1517 return NF_ACCEPT;
1518
1519 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offs_ciph,
1520 "Checking incoming ICMPv6 for");
1521
1522 /* The embedded headers contain source and dest in reverse order
1523 * if not from localhost
1524 */
1525 cp = pp->conn_in_get(AF_INET6, skb, &ciph,
1526 (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1);
1527
1528 if (!cp)
1529 return NF_ACCEPT;
1530 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1531 if ((hooknum == NF_INET_LOCAL_OUT) &&
1532 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1533 __ip_vs_conn_put(cp);
1534 return NF_ACCEPT;
1535 }
1536
1537 /* do the statistics and put it back */
1538 ip_vs_in_stats(cp, skb);
1539
1540 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1541 writable = ciph.len;
1542 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1543 IPPROTO_SCTP == ciph.protocol)
1544 writable += 2 * sizeof(__u16); /* Also mangle ports */
1545
1546 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph);
1547
1548 __ip_vs_conn_put(cp);
1549
1550 return verdict;
1551 }
1552 #endif
1553
1554
1555 /*
1556 * Check if it's for virtual services, look it up,
1557 * and send it on its way...
1558 */
1559 static unsigned int
1560 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1561 {
1562 struct net *net;
1563 struct ip_vs_iphdr iph;
1564 struct ip_vs_protocol *pp;
1565 struct ip_vs_proto_data *pd;
1566 struct ip_vs_conn *cp;
1567 int ret, pkts;
1568 struct netns_ipvs *ipvs;
1569
1570 /* Already marked as IPVS request or reply? */
1571 if (skb->ipvs_property)
1572 return NF_ACCEPT;
1573
1574 /*
1575 * Big tappo:
1576 * - remote client: only PACKET_HOST
1577 * - route: used for struct net when skb->dev is unset
1578 */
1579 if (unlikely((skb->pkt_type != PACKET_HOST &&
1580 hooknum != NF_INET_LOCAL_OUT) ||
1581 !skb_dst(skb))) {
1582 ip_vs_fill_iph_skb(af, skb, &iph);
1583 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1584 " ignored in hook %u\n",
1585 skb->pkt_type, iph.protocol,
1586 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1587 return NF_ACCEPT;
1588 }
1589 /* ipvs enabled in this netns ? */
1590 net = skb_net(skb);
1591 ipvs = net_ipvs(net);
1592 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1593 return NF_ACCEPT;
1594
1595 ip_vs_fill_iph_skb(af, skb, &iph);
1596
1597 /* Bad... Do not break raw sockets */
1598 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1599 af == AF_INET)) {
1600 struct sock *sk = skb->sk;
1601 struct inet_sock *inet = inet_sk(skb->sk);
1602
1603 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1604 return NF_ACCEPT;
1605 }
1606
1607 #ifdef CONFIG_IP_VS_IPV6
1608 if (af == AF_INET6) {
1609 if (!iph.fragoffs && skb_nfct_reasm(skb)) {
1610 struct sk_buff *reasm = skb_nfct_reasm(skb);
1611 /* Save fw mark for coming frags. */
1612 reasm->ipvs_property = 1;
1613 reasm->mark = skb->mark;
1614 }
1615 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1616 int related;
1617 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
1618 &iph);
1619
1620 if (related)
1621 return verdict;
1622 }
1623 } else
1624 #endif
1625 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1626 int related;
1627 int verdict = ip_vs_in_icmp(skb, &related, hooknum);
1628
1629 if (related)
1630 return verdict;
1631 }
1632
1633 /* Protocol supported? */
1634 pd = ip_vs_proto_data_get(net, iph.protocol);
1635 if (unlikely(!pd))
1636 return NF_ACCEPT;
1637 pp = pd->pp;
1638 /*
1639 * Check if the packet belongs to an existing connection entry
1640 */
1641 cp = pp->conn_in_get(af, skb, &iph, 0);
1642
1643 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
1644 unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
1645 is_new_conn(skb, &iph)) {
1646 ip_vs_conn_expire_now(cp);
1647 __ip_vs_conn_put(cp);
1648 cp = NULL;
1649 }
1650
1651 if (unlikely(!cp) && !iph.fragoffs) {
1652 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1653 * replayed fragment zero will already have created the cp
1654 */
1655 int v;
1656
1657 /* Schedule and create new connection entry into &cp */
1658 if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph))
1659 return v;
1660 }
1661
1662 if (unlikely(!cp)) {
1663 /* sorry, all this trouble for a no-hit :) */
1664 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1665 "ip_vs_in: packet continues traversal as normal");
1666 if (iph.fragoffs && !skb_nfct_reasm(skb)) {
1667 /* Fragment that couldn't be mapped to a conn entry
1668 * and don't have any pointer to a reasm skb
1669 * is missing module nf_defrag_ipv6
1670 */
1671 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1672 IP_VS_DBG_PKT(7, af, pp, skb, 0, "unhandled fragment");
1673 }
1674 return NF_ACCEPT;
1675 }
1676
1677 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1678 /* Check the server status */
1679 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1680 /* the destination server is not available */
1681
1682 if (sysctl_expire_nodest_conn(ipvs)) {
1683 /* try to expire the connection immediately */
1684 ip_vs_conn_expire_now(cp);
1685 }
1686 /* don't restart its timer, and silently
1687 drop the packet. */
1688 __ip_vs_conn_put(cp);
1689 return NF_DROP;
1690 }
1691
1692 ip_vs_in_stats(cp, skb);
1693 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1694 if (cp->packet_xmit)
1695 ret = cp->packet_xmit(skb, cp, pp, &iph);
1696 /* do not touch skb anymore */
1697 else {
1698 IP_VS_DBG_RL("warning: packet_xmit is null");
1699 ret = NF_ACCEPT;
1700 }
1701
1702 /* Increase its packet counter and check if it is needed
1703 * to be synchronized
1704 *
1705 * Sync connection if it is about to close to
1706 * encorage the standby servers to update the connections timeout
1707 *
1708 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1709 */
1710
1711 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1712 pkts = sysctl_sync_threshold(ipvs);
1713 else
1714 pkts = atomic_add_return(1, &cp->in_pkts);
1715
1716 if (ipvs->sync_state & IP_VS_STATE_MASTER)
1717 ip_vs_sync_conn(net, cp, pkts);
1718
1719 ip_vs_conn_put(cp);
1720 return ret;
1721 }
1722
1723 /*
1724 * AF_INET handler in NF_INET_LOCAL_IN chain
1725 * Schedule and forward packets from remote clients
1726 */
1727 static unsigned int
1728 ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
1729 const struct net_device *in,
1730 const struct net_device *out,
1731 int (*okfn)(struct sk_buff *))
1732 {
1733 return ip_vs_in(hooknum, skb, AF_INET);
1734 }
1735
1736 /*
1737 * AF_INET handler in NF_INET_LOCAL_OUT chain
1738 * Schedule and forward packets from local clients
1739 */
1740 static unsigned int
1741 ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
1742 const struct net_device *in, const struct net_device *out,
1743 int (*okfn)(struct sk_buff *))
1744 {
1745 return ip_vs_in(hooknum, skb, AF_INET);
1746 }
1747
1748 #ifdef CONFIG_IP_VS_IPV6
1749
1750 /*
1751 * AF_INET6 fragment handling
1752 * Copy info from first fragment, to the rest of them.
1753 */
1754 static unsigned int
1755 ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
1756 const struct net_device *in,
1757 const struct net_device *out,
1758 int (*okfn)(struct sk_buff *))
1759 {
1760 struct sk_buff *reasm = skb_nfct_reasm(skb);
1761 struct net *net;
1762
1763 /* Skip if not a "replay" from nf_ct_frag6_output or first fragment.
1764 * ipvs_property is set when checking first fragment
1765 * in ip_vs_in() and ip_vs_out().
1766 */
1767 if (reasm)
1768 IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property);
1769 if (!reasm || !reasm->ipvs_property)
1770 return NF_ACCEPT;
1771
1772 net = skb_net(skb);
1773 if (!net_ipvs(net)->enable)
1774 return NF_ACCEPT;
1775
1776 /* Copy stored fw mark, saved in ip_vs_{in,out} */
1777 skb->mark = reasm->mark;
1778
1779 return NF_ACCEPT;
1780 }
1781
1782 /*
1783 * AF_INET6 handler in NF_INET_LOCAL_IN chain
1784 * Schedule and forward packets from remote clients
1785 */
1786 static unsigned int
1787 ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
1788 const struct net_device *in,
1789 const struct net_device *out,
1790 int (*okfn)(struct sk_buff *))
1791 {
1792 return ip_vs_in(hooknum, skb, AF_INET6);
1793 }
1794
1795 /*
1796 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
1797 * Schedule and forward packets from local clients
1798 */
1799 static unsigned int
1800 ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
1801 const struct net_device *in, const struct net_device *out,
1802 int (*okfn)(struct sk_buff *))
1803 {
1804 return ip_vs_in(hooknum, skb, AF_INET6);
1805 }
1806
1807 #endif
1808
1809
1810 /*
1811 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1812 * related packets destined for 0.0.0.0/0.
1813 * When fwmark-based virtual service is used, such as transparent
1814 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1815 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1816 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1817 * and send them to ip_vs_in_icmp.
1818 */
1819 static unsigned int
1820 ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1821 const struct net_device *in, const struct net_device *out,
1822 int (*okfn)(struct sk_buff *))
1823 {
1824 int r;
1825 struct net *net;
1826 struct netns_ipvs *ipvs;
1827
1828 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1829 return NF_ACCEPT;
1830
1831 /* ipvs enabled in this netns ? */
1832 net = skb_net(skb);
1833 ipvs = net_ipvs(net);
1834 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1835 return NF_ACCEPT;
1836
1837 return ip_vs_in_icmp(skb, &r, hooknum);
1838 }
1839
1840 #ifdef CONFIG_IP_VS_IPV6
1841 static unsigned int
1842 ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1843 const struct net_device *in, const struct net_device *out,
1844 int (*okfn)(struct sk_buff *))
1845 {
1846 int r;
1847 struct net *net;
1848 struct netns_ipvs *ipvs;
1849 struct ip_vs_iphdr iphdr;
1850
1851 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
1852 if (iphdr.protocol != IPPROTO_ICMPV6)
1853 return NF_ACCEPT;
1854
1855 /* ipvs enabled in this netns ? */
1856 net = skb_net(skb);
1857 ipvs = net_ipvs(net);
1858 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1859 return NF_ACCEPT;
1860
1861 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
1862 }
1863 #endif
1864
1865
1866 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1867 /* After packet filtering, change source only for VS/NAT */
1868 {
1869 .hook = ip_vs_reply4,
1870 .owner = THIS_MODULE,
1871 .pf = NFPROTO_IPV4,
1872 .hooknum = NF_INET_LOCAL_IN,
1873 .priority = NF_IP_PRI_NAT_SRC - 2,
1874 },
1875 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1876 * or VS/NAT(change destination), so that filtering rules can be
1877 * applied to IPVS. */
1878 {
1879 .hook = ip_vs_remote_request4,
1880 .owner = THIS_MODULE,
1881 .pf = NFPROTO_IPV4,
1882 .hooknum = NF_INET_LOCAL_IN,
1883 .priority = NF_IP_PRI_NAT_SRC - 1,
1884 },
1885 /* Before ip_vs_in, change source only for VS/NAT */
1886 {
1887 .hook = ip_vs_local_reply4,
1888 .owner = THIS_MODULE,
1889 .pf = NFPROTO_IPV4,
1890 .hooknum = NF_INET_LOCAL_OUT,
1891 .priority = NF_IP_PRI_NAT_DST + 1,
1892 },
1893 /* After mangle, schedule and forward local requests */
1894 {
1895 .hook = ip_vs_local_request4,
1896 .owner = THIS_MODULE,
1897 .pf = NFPROTO_IPV4,
1898 .hooknum = NF_INET_LOCAL_OUT,
1899 .priority = NF_IP_PRI_NAT_DST + 2,
1900 },
1901 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1902 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1903 {
1904 .hook = ip_vs_forward_icmp,
1905 .owner = THIS_MODULE,
1906 .pf = NFPROTO_IPV4,
1907 .hooknum = NF_INET_FORWARD,
1908 .priority = 99,
1909 },
1910 /* After packet filtering, change source only for VS/NAT */
1911 {
1912 .hook = ip_vs_reply4,
1913 .owner = THIS_MODULE,
1914 .pf = NFPROTO_IPV4,
1915 .hooknum = NF_INET_FORWARD,
1916 .priority = 100,
1917 },
1918 #ifdef CONFIG_IP_VS_IPV6
1919 /* After mangle & nat fetch 2:nd fragment and following */
1920 {
1921 .hook = ip_vs_preroute_frag6,
1922 .owner = THIS_MODULE,
1923 .pf = NFPROTO_IPV6,
1924 .hooknum = NF_INET_PRE_ROUTING,
1925 .priority = NF_IP6_PRI_NAT_DST + 1,
1926 },
1927 /* After packet filtering, change source only for VS/NAT */
1928 {
1929 .hook = ip_vs_reply6,
1930 .owner = THIS_MODULE,
1931 .pf = NFPROTO_IPV6,
1932 .hooknum = NF_INET_LOCAL_IN,
1933 .priority = NF_IP6_PRI_NAT_SRC - 2,
1934 },
1935 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1936 * or VS/NAT(change destination), so that filtering rules can be
1937 * applied to IPVS. */
1938 {
1939 .hook = ip_vs_remote_request6,
1940 .owner = THIS_MODULE,
1941 .pf = NFPROTO_IPV6,
1942 .hooknum = NF_INET_LOCAL_IN,
1943 .priority = NF_IP6_PRI_NAT_SRC - 1,
1944 },
1945 /* Before ip_vs_in, change source only for VS/NAT */
1946 {
1947 .hook = ip_vs_local_reply6,
1948 .owner = THIS_MODULE,
1949 .pf = NFPROTO_IPV4,
1950 .hooknum = NF_INET_LOCAL_OUT,
1951 .priority = NF_IP6_PRI_NAT_DST + 1,
1952 },
1953 /* After mangle, schedule and forward local requests */
1954 {
1955 .hook = ip_vs_local_request6,
1956 .owner = THIS_MODULE,
1957 .pf = NFPROTO_IPV6,
1958 .hooknum = NF_INET_LOCAL_OUT,
1959 .priority = NF_IP6_PRI_NAT_DST + 2,
1960 },
1961 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1962 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1963 {
1964 .hook = ip_vs_forward_icmp_v6,
1965 .owner = THIS_MODULE,
1966 .pf = NFPROTO_IPV6,
1967 .hooknum = NF_INET_FORWARD,
1968 .priority = 99,
1969 },
1970 /* After packet filtering, change source only for VS/NAT */
1971 {
1972 .hook = ip_vs_reply6,
1973 .owner = THIS_MODULE,
1974 .pf = NFPROTO_IPV6,
1975 .hooknum = NF_INET_FORWARD,
1976 .priority = 100,
1977 },
1978 #endif
1979 };
1980 /*
1981 * Initialize IP Virtual Server netns mem.
1982 */
1983 static int __net_init __ip_vs_init(struct net *net)
1984 {
1985 struct netns_ipvs *ipvs;
1986
1987 ipvs = net_generic(net, ip_vs_net_id);
1988 if (ipvs == NULL)
1989 return -ENOMEM;
1990
1991 /* Hold the beast until a service is registerd */
1992 ipvs->enable = 0;
1993 ipvs->net = net;
1994 /* Counters used for creating unique names */
1995 ipvs->gen = atomic_read(&ipvs_netns_cnt);
1996 atomic_inc(&ipvs_netns_cnt);
1997 net->ipvs = ipvs;
1998
1999 if (ip_vs_estimator_net_init(net) < 0)
2000 goto estimator_fail;
2001
2002 if (ip_vs_control_net_init(net) < 0)
2003 goto control_fail;
2004
2005 if (ip_vs_protocol_net_init(net) < 0)
2006 goto protocol_fail;
2007
2008 if (ip_vs_app_net_init(net) < 0)
2009 goto app_fail;
2010
2011 if (ip_vs_conn_net_init(net) < 0)
2012 goto conn_fail;
2013
2014 if (ip_vs_sync_net_init(net) < 0)
2015 goto sync_fail;
2016
2017 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
2018 sizeof(struct netns_ipvs), ipvs->gen);
2019 return 0;
2020 /*
2021 * Error handling
2022 */
2023
2024 sync_fail:
2025 ip_vs_conn_net_cleanup(net);
2026 conn_fail:
2027 ip_vs_app_net_cleanup(net);
2028 app_fail:
2029 ip_vs_protocol_net_cleanup(net);
2030 protocol_fail:
2031 ip_vs_control_net_cleanup(net);
2032 control_fail:
2033 ip_vs_estimator_net_cleanup(net);
2034 estimator_fail:
2035 net->ipvs = NULL;
2036 return -ENOMEM;
2037 }
2038
2039 static void __net_exit __ip_vs_cleanup(struct net *net)
2040 {
2041 ip_vs_service_net_cleanup(net); /* ip_vs_flush() with locks */
2042 ip_vs_conn_net_cleanup(net);
2043 ip_vs_app_net_cleanup(net);
2044 ip_vs_protocol_net_cleanup(net);
2045 ip_vs_control_net_cleanup(net);
2046 ip_vs_estimator_net_cleanup(net);
2047 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
2048 net->ipvs = NULL;
2049 }
2050
2051 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
2052 {
2053 EnterFunction(2);
2054 net_ipvs(net)->enable = 0; /* Disable packet reception */
2055 smp_wmb();
2056 ip_vs_sync_net_cleanup(net);
2057 LeaveFunction(2);
2058 }
2059
2060 static struct pernet_operations ipvs_core_ops = {
2061 .init = __ip_vs_init,
2062 .exit = __ip_vs_cleanup,
2063 .id = &ip_vs_net_id,
2064 .size = sizeof(struct netns_ipvs),
2065 };
2066
2067 static struct pernet_operations ipvs_core_dev_ops = {
2068 .exit = __ip_vs_dev_cleanup,
2069 };
2070
2071 /*
2072 * Initialize IP Virtual Server
2073 */
2074 static int __init ip_vs_init(void)
2075 {
2076 int ret;
2077
2078 ret = ip_vs_control_init();
2079 if (ret < 0) {
2080 pr_err("can't setup control.\n");
2081 goto exit;
2082 }
2083
2084 ip_vs_protocol_init();
2085
2086 ret = ip_vs_conn_init();
2087 if (ret < 0) {
2088 pr_err("can't setup connection table.\n");
2089 goto cleanup_protocol;
2090 }
2091
2092 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2093 if (ret < 0)
2094 goto cleanup_conn;
2095
2096 ret = register_pernet_device(&ipvs_core_dev_ops);
2097 if (ret < 0)
2098 goto cleanup_sub;
2099
2100 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2101 if (ret < 0) {
2102 pr_err("can't register hooks.\n");
2103 goto cleanup_dev;
2104 }
2105
2106 ret = ip_vs_register_nl_ioctl();
2107 if (ret < 0) {
2108 pr_err("can't register netlink/ioctl.\n");
2109 goto cleanup_hooks;
2110 }
2111
2112 pr_info("ipvs loaded.\n");
2113
2114 return ret;
2115
2116 cleanup_hooks:
2117 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2118 cleanup_dev:
2119 unregister_pernet_device(&ipvs_core_dev_ops);
2120 cleanup_sub:
2121 unregister_pernet_subsys(&ipvs_core_ops);
2122 cleanup_conn:
2123 ip_vs_conn_cleanup();
2124 cleanup_protocol:
2125 ip_vs_protocol_cleanup();
2126 ip_vs_control_cleanup();
2127 exit:
2128 return ret;
2129 }
2130
2131 static void __exit ip_vs_cleanup(void)
2132 {
2133 ip_vs_unregister_nl_ioctl();
2134 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2135 unregister_pernet_device(&ipvs_core_dev_ops);
2136 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2137 ip_vs_conn_cleanup();
2138 ip_vs_protocol_cleanup();
2139 ip_vs_control_cleanup();
2140 pr_info("ipvs unloaded.\n");
2141 }
2142
2143 module_init(ip_vs_init);
2144 module_exit(ip_vs_cleanup);
2145 MODULE_LICENSE("GPL");