6132b213eddc369770de650680e238232e9b924b
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / net / netfilter / ipvs / ip_vs_xmit.c
1 /*
2 * ip_vs_xmit.c: various packet transmitters for IPVS
3 *
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Changes:
13 *
14 * Description of forwarding methods:
15 * - all transmitters are called from LOCAL_IN (remote clients) and
16 * LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
17 * - not all connections have destination server, for example,
18 * connections in backup server when fwmark is used
19 * - bypass connections use daddr from packet
20 * LOCAL_OUT rules:
21 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
22 * - skb->pkt_type is not set yet
23 * - the only place where we can see skb->sk != NULL
24 */
25
26 #define KMSG_COMPONENT "IPVS"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/tcp.h> /* for tcphdr */
32 #include <net/ip.h>
33 #include <net/tcp.h> /* for csum_tcpudp_magic */
34 #include <net/udp.h>
35 #include <net/icmp.h> /* for icmp_send */
36 #include <net/route.h> /* for ip_route_output */
37 #include <net/ipv6.h>
38 #include <net/ip6_route.h>
39 #include <net/addrconf.h>
40 #include <linux/icmpv6.h>
41 #include <linux/netfilter.h>
42 #include <linux/netfilter_ipv4.h>
43
44 #include <net/ip_vs.h>
45
46 enum {
47 IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
48 IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
49 IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
50 * local
51 */
52 };
53
54 /*
55 * Destination cache to speed up outgoing route lookup
56 */
57 static inline void
58 __ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst,
59 u32 dst_cookie)
60 {
61 struct dst_entry *old_dst;
62
63 old_dst = dest->dst_cache;
64 dest->dst_cache = dst;
65 dest->dst_rtos = rtos;
66 dest->dst_cookie = dst_cookie;
67 dst_release(old_dst);
68 }
69
70 static inline struct dst_entry *
71 __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
72 {
73 struct dst_entry *dst = dest->dst_cache;
74
75 if (!dst)
76 return NULL;
77 if ((dst->obsolete || rtos != dest->dst_rtos) &&
78 dst->ops->check(dst, dest->dst_cookie) == NULL) {
79 dest->dst_cache = NULL;
80 dst_release(dst);
81 return NULL;
82 }
83 dst_hold(dst);
84 return dst;
85 }
86
87 /* Get route to destination or remote server */
88 static struct rtable *
89 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
90 __be32 daddr, u32 rtos, int rt_mode)
91 {
92 struct net *net = dev_net(skb_dst(skb)->dev);
93 struct rtable *rt; /* Route to the other host */
94 struct rtable *ort; /* Original route */
95 int local;
96
97 if (dest) {
98 spin_lock(&dest->dst_lock);
99 if (!(rt = (struct rtable *)
100 __ip_vs_dst_check(dest, rtos))) {
101 rt = ip_route_output(net, dest->addr.ip, 0, rtos, 0);
102 if (IS_ERR(rt)) {
103 spin_unlock(&dest->dst_lock);
104 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
105 &dest->addr.ip);
106 return NULL;
107 }
108 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
109 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
110 &dest->addr.ip,
111 atomic_read(&rt->dst.__refcnt), rtos);
112 }
113 spin_unlock(&dest->dst_lock);
114 } else {
115 rt = ip_route_output(net, daddr, 0, rtos, 0);
116 if (IS_ERR(rt)) {
117 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
118 &daddr);
119 return NULL;
120 }
121 }
122
123 local = rt->rt_flags & RTCF_LOCAL;
124 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
125 rt_mode)) {
126 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
127 (rt->rt_flags & RTCF_LOCAL) ?
128 "local":"non-local", &rt->rt_dst);
129 ip_rt_put(rt);
130 return NULL;
131 }
132 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
133 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
134 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
135 "requires NAT method, dest: %pI4\n",
136 &ip_hdr(skb)->daddr, &rt->rt_dst);
137 ip_rt_put(rt);
138 return NULL;
139 }
140 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
141 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
142 "to non-local address, dest: %pI4\n",
143 &ip_hdr(skb)->saddr, &rt->rt_dst);
144 ip_rt_put(rt);
145 return NULL;
146 }
147
148 return rt;
149 }
150
151 /* Reroute packet to local IPv4 stack after DNAT */
152 static int
153 __ip_vs_reroute_locally(struct sk_buff *skb)
154 {
155 struct rtable *rt = skb_rtable(skb);
156 struct net_device *dev = rt->dst.dev;
157 struct net *net = dev_net(dev);
158 struct iphdr *iph = ip_hdr(skb);
159
160 if (rt_is_input_route(rt)) {
161 unsigned long orefdst = skb->_skb_refdst;
162
163 if (ip_route_input(skb, iph->daddr, iph->saddr,
164 iph->tos, skb->dev))
165 return 0;
166 refdst_drop(orefdst);
167 } else {
168 struct flowi4 fl4 = {
169 .daddr = iph->daddr,
170 .saddr = iph->saddr,
171 .flowi4_tos = RT_TOS(iph->tos),
172 .flowi4_mark = skb->mark,
173 };
174
175 rt = ip_route_output_key(net, &fl4);
176 if (IS_ERR(rt))
177 return 0;
178 if (!(rt->rt_flags & RTCF_LOCAL)) {
179 ip_rt_put(rt);
180 return 0;
181 }
182 /* Drop old route. */
183 skb_dst_drop(skb);
184 skb_dst_set(skb, &rt->dst);
185 }
186 return 1;
187 }
188
189 #ifdef CONFIG_IP_VS_IPV6
190
191 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
192 {
193 return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK;
194 }
195
196 static struct dst_entry *
197 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
198 struct in6_addr *ret_saddr, int do_xfrm)
199 {
200 struct dst_entry *dst;
201 struct flowi6 fl6 = {
202 .daddr = *daddr,
203 };
204
205 dst = ip6_route_output(net, NULL, &fl6);
206 if (dst->error)
207 goto out_err;
208 if (!ret_saddr)
209 return dst;
210 if (ipv6_addr_any(&fl6.saddr) &&
211 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
212 &fl6.daddr, 0, &fl6.saddr) < 0)
213 goto out_err;
214 if (do_xfrm) {
215 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
216 if (IS_ERR(dst)) {
217 dst = NULL;
218 goto out_err;
219 }
220 }
221 ipv6_addr_copy(ret_saddr, &fl6.saddr);
222 return dst;
223
224 out_err:
225 dst_release(dst);
226 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
227 return NULL;
228 }
229
230 /*
231 * Get route to destination or remote server
232 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
233 * &4=Allow redirect from remote daddr to local
234 */
235 static struct rt6_info *
236 __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
237 struct in6_addr *daddr, struct in6_addr *ret_saddr,
238 int do_xfrm, int rt_mode)
239 {
240 struct net *net = dev_net(skb_dst(skb)->dev);
241 struct rt6_info *rt; /* Route to the other host */
242 struct rt6_info *ort; /* Original route */
243 struct dst_entry *dst;
244 int local;
245
246 if (dest) {
247 spin_lock(&dest->dst_lock);
248 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0);
249 if (!rt) {
250 u32 cookie;
251
252 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
253 &dest->dst_saddr,
254 do_xfrm);
255 if (!dst) {
256 spin_unlock(&dest->dst_lock);
257 return NULL;
258 }
259 rt = (struct rt6_info *) dst;
260 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
261 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
262 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
263 &dest->addr.in6, &dest->dst_saddr,
264 atomic_read(&rt->dst.__refcnt));
265 }
266 if (ret_saddr)
267 ipv6_addr_copy(ret_saddr, &dest->dst_saddr);
268 spin_unlock(&dest->dst_lock);
269 } else {
270 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
271 if (!dst)
272 return NULL;
273 rt = (struct rt6_info *) dst;
274 }
275
276 local = __ip_vs_is_local_route6(rt);
277 if (!((local ? 1 : 2) & rt_mode)) {
278 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
279 local ? "local":"non-local", daddr);
280 dst_release(&rt->dst);
281 return NULL;
282 }
283 if (local && !(rt_mode & 4) &&
284 !((ort = (struct rt6_info *) skb_dst(skb)) &&
285 __ip_vs_is_local_route6(ort))) {
286 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
287 "requires NAT method, dest: %pI6\n",
288 &ipv6_hdr(skb)->daddr, daddr);
289 dst_release(&rt->dst);
290 return NULL;
291 }
292 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
293 ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
294 IPV6_ADDR_LOOPBACK)) {
295 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6 "
296 "to non-local address, dest: %pI6\n",
297 &ipv6_hdr(skb)->saddr, daddr);
298 dst_release(&rt->dst);
299 return NULL;
300 }
301
302 return rt;
303 }
304 #endif
305
306
307 /*
308 * Release dest->dst_cache before a dest is removed
309 */
310 void
311 ip_vs_dst_reset(struct ip_vs_dest *dest)
312 {
313 struct dst_entry *old_dst;
314
315 old_dst = dest->dst_cache;
316 dest->dst_cache = NULL;
317 dst_release(old_dst);
318 }
319
320 #define IP_VS_XMIT_TUNNEL(skb, cp) \
321 ({ \
322 int __ret = NF_ACCEPT; \
323 \
324 (skb)->ipvs_property = 1; \
325 if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \
326 __ret = ip_vs_confirm_conntrack(skb, cp); \
327 if (__ret == NF_ACCEPT) { \
328 nf_reset(skb); \
329 skb_forward_csum(skb); \
330 } \
331 __ret; \
332 })
333
334 #define IP_VS_XMIT_NAT(pf, skb, cp, local) \
335 do { \
336 (skb)->ipvs_property = 1; \
337 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
338 ip_vs_notrack(skb); \
339 else \
340 ip_vs_update_conntrack(skb, cp, 1); \
341 if (local) \
342 return NF_ACCEPT; \
343 skb_forward_csum(skb); \
344 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
345 skb_dst(skb)->dev, dst_output); \
346 } while (0)
347
348 #define IP_VS_XMIT(pf, skb, cp, local) \
349 do { \
350 (skb)->ipvs_property = 1; \
351 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
352 ip_vs_notrack(skb); \
353 if (local) \
354 return NF_ACCEPT; \
355 skb_forward_csum(skb); \
356 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
357 skb_dst(skb)->dev, dst_output); \
358 } while (0)
359
360
361 /*
362 * NULL transmitter (do nothing except return NF_ACCEPT)
363 */
364 int
365 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
366 struct ip_vs_protocol *pp)
367 {
368 /* we do not touch skb and do not need pskb ptr */
369 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
370 }
371
372
373 /*
374 * Bypass transmitter
375 * Let packets bypass the destination when the destination is not
376 * available, it may be only used in transparent cache cluster.
377 */
378 int
379 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
380 struct ip_vs_protocol *pp)
381 {
382 struct rtable *rt; /* Route to the other host */
383 struct iphdr *iph = ip_hdr(skb);
384 int mtu;
385
386 EnterFunction(10);
387
388 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
389 IP_VS_RT_MODE_NON_LOCAL)))
390 goto tx_error_icmp;
391
392 /* MTU checking */
393 mtu = dst_mtu(&rt->dst);
394 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
395 !skb_is_gso(skb)) {
396 ip_rt_put(rt);
397 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
398 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
399 goto tx_error;
400 }
401
402 /*
403 * Call ip_send_check because we are not sure it is called
404 * after ip_defrag. Is copy-on-write needed?
405 */
406 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
407 ip_rt_put(rt);
408 return NF_STOLEN;
409 }
410 ip_send_check(ip_hdr(skb));
411
412 /* drop old route */
413 skb_dst_drop(skb);
414 skb_dst_set(skb, &rt->dst);
415
416 /* Another hack: avoid icmp_send in ip_fragment */
417 skb->local_df = 1;
418
419 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
420
421 LeaveFunction(10);
422 return NF_STOLEN;
423
424 tx_error_icmp:
425 dst_link_failure(skb);
426 tx_error:
427 kfree_skb(skb);
428 LeaveFunction(10);
429 return NF_STOLEN;
430 }
431
432 #ifdef CONFIG_IP_VS_IPV6
433 int
434 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
435 struct ip_vs_protocol *pp)
436 {
437 struct rt6_info *rt; /* Route to the other host */
438 struct ipv6hdr *iph = ipv6_hdr(skb);
439 int mtu;
440
441 EnterFunction(10);
442
443 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 2)))
444 goto tx_error_icmp;
445
446 /* MTU checking */
447 mtu = dst_mtu(&rt->dst);
448 if (skb->len > mtu && !skb_is_gso(skb)) {
449 if (!skb->dev) {
450 struct net *net = dev_net(skb_dst(skb)->dev);
451
452 skb->dev = net->loopback_dev;
453 }
454 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
455 dst_release(&rt->dst);
456 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
457 goto tx_error;
458 }
459
460 /*
461 * Call ip_send_check because we are not sure it is called
462 * after ip_defrag. Is copy-on-write needed?
463 */
464 skb = skb_share_check(skb, GFP_ATOMIC);
465 if (unlikely(skb == NULL)) {
466 dst_release(&rt->dst);
467 return NF_STOLEN;
468 }
469
470 /* drop old route */
471 skb_dst_drop(skb);
472 skb_dst_set(skb, &rt->dst);
473
474 /* Another hack: avoid icmp_send in ip_fragment */
475 skb->local_df = 1;
476
477 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
478
479 LeaveFunction(10);
480 return NF_STOLEN;
481
482 tx_error_icmp:
483 dst_link_failure(skb);
484 tx_error:
485 kfree_skb(skb);
486 LeaveFunction(10);
487 return NF_STOLEN;
488 }
489 #endif
490
491 /*
492 * NAT transmitter (only for outside-to-inside nat forwarding)
493 * Not used for related ICMP
494 */
495 int
496 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
497 struct ip_vs_protocol *pp)
498 {
499 struct rtable *rt; /* Route to the other host */
500 int mtu;
501 struct iphdr *iph = ip_hdr(skb);
502 int local;
503
504 EnterFunction(10);
505
506 /* check if it is a connection of no-client-port */
507 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
508 __be16 _pt, *p;
509 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
510 if (p == NULL)
511 goto tx_error;
512 ip_vs_conn_fill_cport(cp, *p);
513 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
514 }
515
516 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
517 RT_TOS(iph->tos),
518 IP_VS_RT_MODE_LOCAL |
519 IP_VS_RT_MODE_NON_LOCAL |
520 IP_VS_RT_MODE_RDR)))
521 goto tx_error_icmp;
522 local = rt->rt_flags & RTCF_LOCAL;
523 /*
524 * Avoid duplicate tuple in reply direction for NAT traffic
525 * to local address when connection is sync-ed
526 */
527 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
528 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
529 enum ip_conntrack_info ctinfo;
530 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
531
532 if (ct && !nf_ct_is_untracked(ct)) {
533 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
534 "ip_vs_nat_xmit(): "
535 "stopping DNAT to local address");
536 goto tx_error_put;
537 }
538 }
539 #endif
540
541 /* From world but DNAT to loopback address? */
542 if (local && ipv4_is_loopback(rt->rt_dst) &&
543 rt_is_input_route(skb_rtable(skb))) {
544 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
545 "stopping DNAT to loopback address");
546 goto tx_error_put;
547 }
548
549 /* MTU checking */
550 mtu = dst_mtu(&rt->dst);
551 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
552 !skb_is_gso(skb)) {
553 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
554 IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
555 "ip_vs_nat_xmit(): frag needed for");
556 goto tx_error_put;
557 }
558
559 /* copy-on-write the packet before mangling it */
560 if (!skb_make_writable(skb, sizeof(struct iphdr)))
561 goto tx_error_put;
562
563 if (skb_cow(skb, rt->dst.dev->hard_header_len))
564 goto tx_error_put;
565
566 /* mangle the packet */
567 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
568 goto tx_error_put;
569 ip_hdr(skb)->daddr = cp->daddr.ip;
570 ip_send_check(ip_hdr(skb));
571
572 if (!local) {
573 /* drop old route */
574 skb_dst_drop(skb);
575 skb_dst_set(skb, &rt->dst);
576 } else {
577 ip_rt_put(rt);
578 /*
579 * Some IPv4 replies get local address from routes,
580 * not from iph, so while we DNAT after routing
581 * we need this second input/output route.
582 */
583 if (!__ip_vs_reroute_locally(skb))
584 goto tx_error;
585 }
586
587 IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
588
589 /* FIXME: when application helper enlarges the packet and the length
590 is larger than the MTU of outgoing device, there will be still
591 MTU problem. */
592
593 /* Another hack: avoid icmp_send in ip_fragment */
594 skb->local_df = 1;
595
596 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
597
598 LeaveFunction(10);
599 return NF_STOLEN;
600
601 tx_error_icmp:
602 dst_link_failure(skb);
603 tx_error:
604 kfree_skb(skb);
605 LeaveFunction(10);
606 return NF_STOLEN;
607 tx_error_put:
608 ip_rt_put(rt);
609 goto tx_error;
610 }
611
612 #ifdef CONFIG_IP_VS_IPV6
613 int
614 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
615 struct ip_vs_protocol *pp)
616 {
617 struct rt6_info *rt; /* Route to the other host */
618 int mtu;
619 int local;
620
621 EnterFunction(10);
622
623 /* check if it is a connection of no-client-port */
624 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
625 __be16 _pt, *p;
626 p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
627 sizeof(_pt), &_pt);
628 if (p == NULL)
629 goto tx_error;
630 ip_vs_conn_fill_cport(cp, *p);
631 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
632 }
633
634 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
635 0, 1|2|4)))
636 goto tx_error_icmp;
637 local = __ip_vs_is_local_route6(rt);
638 /*
639 * Avoid duplicate tuple in reply direction for NAT traffic
640 * to local address when connection is sync-ed
641 */
642 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
643 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
644 enum ip_conntrack_info ctinfo;
645 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
646
647 if (ct && !nf_ct_is_untracked(ct)) {
648 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
649 "ip_vs_nat_xmit_v6(): "
650 "stopping DNAT to local address");
651 goto tx_error_put;
652 }
653 }
654 #endif
655
656 /* From world but DNAT to loopback address? */
657 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
658 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
659 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
660 "ip_vs_nat_xmit_v6(): "
661 "stopping DNAT to loopback address");
662 goto tx_error_put;
663 }
664
665 /* MTU checking */
666 mtu = dst_mtu(&rt->dst);
667 if (skb->len > mtu && !skb_is_gso(skb)) {
668 if (!skb->dev) {
669 struct net *net = dev_net(skb_dst(skb)->dev);
670
671 skb->dev = net->loopback_dev;
672 }
673 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
674 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
675 "ip_vs_nat_xmit_v6(): frag needed for");
676 goto tx_error_put;
677 }
678
679 /* copy-on-write the packet before mangling it */
680 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
681 goto tx_error_put;
682
683 if (skb_cow(skb, rt->dst.dev->hard_header_len))
684 goto tx_error_put;
685
686 /* mangle the packet */
687 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
688 goto tx_error;
689 ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
690
691 if (!local || !skb->dev) {
692 /* drop the old route when skb is not shared */
693 skb_dst_drop(skb);
694 skb_dst_set(skb, &rt->dst);
695 } else {
696 /* destined to loopback, do we need to change route? */
697 dst_release(&rt->dst);
698 }
699
700 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
701
702 /* FIXME: when application helper enlarges the packet and the length
703 is larger than the MTU of outgoing device, there will be still
704 MTU problem. */
705
706 /* Another hack: avoid icmp_send in ip_fragment */
707 skb->local_df = 1;
708
709 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
710
711 LeaveFunction(10);
712 return NF_STOLEN;
713
714 tx_error_icmp:
715 dst_link_failure(skb);
716 tx_error:
717 LeaveFunction(10);
718 kfree_skb(skb);
719 return NF_STOLEN;
720 tx_error_put:
721 dst_release(&rt->dst);
722 goto tx_error;
723 }
724 #endif
725
726
727 /*
728 * IP Tunneling transmitter
729 *
730 * This function encapsulates the packet in a new IP packet, its
731 * destination will be set to cp->daddr. Most code of this function
732 * is taken from ipip.c.
733 *
734 * It is used in VS/TUN cluster. The load balancer selects a real
735 * server from a cluster based on a scheduling algorithm,
736 * encapsulates the request packet and forwards it to the selected
737 * server. For example, all real servers are configured with
738 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
739 * the encapsulated packet, it will decapsulate the packet, processe
740 * the request and return the response packets directly to the client
741 * without passing the load balancer. This can greatly increase the
742 * scalability of virtual server.
743 *
744 * Used for ANY protocol
745 */
746 int
747 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
748 struct ip_vs_protocol *pp)
749 {
750 struct rtable *rt; /* Route to the other host */
751 struct net_device *tdev; /* Device to other host */
752 struct iphdr *old_iph = ip_hdr(skb);
753 u8 tos = old_iph->tos;
754 __be16 df = old_iph->frag_off;
755 struct iphdr *iph; /* Our new IP header */
756 unsigned int max_headroom; /* The extra header space needed */
757 int mtu;
758 int ret;
759
760 EnterFunction(10);
761
762 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
763 RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
764 IP_VS_RT_MODE_NON_LOCAL)))
765 goto tx_error_icmp;
766 if (rt->rt_flags & RTCF_LOCAL) {
767 ip_rt_put(rt);
768 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
769 }
770
771 tdev = rt->dst.dev;
772
773 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
774 if (mtu < 68) {
775 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
776 goto tx_error_put;
777 }
778 if (skb_dst(skb))
779 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
780
781 df |= (old_iph->frag_off & htons(IP_DF));
782
783 if ((old_iph->frag_off & htons(IP_DF) &&
784 mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
785 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
786 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
787 goto tx_error_put;
788 }
789
790 /*
791 * Okay, now see if we can stuff it in the buffer as-is.
792 */
793 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
794
795 if (skb_headroom(skb) < max_headroom
796 || skb_cloned(skb) || skb_shared(skb)) {
797 struct sk_buff *new_skb =
798 skb_realloc_headroom(skb, max_headroom);
799 if (!new_skb) {
800 ip_rt_put(rt);
801 kfree_skb(skb);
802 IP_VS_ERR_RL("%s(): no memory\n", __func__);
803 return NF_STOLEN;
804 }
805 kfree_skb(skb);
806 skb = new_skb;
807 old_iph = ip_hdr(skb);
808 }
809
810 skb->transport_header = skb->network_header;
811
812 /* fix old IP header checksum */
813 ip_send_check(old_iph);
814
815 skb_push(skb, sizeof(struct iphdr));
816 skb_reset_network_header(skb);
817 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
818
819 /* drop old route */
820 skb_dst_drop(skb);
821 skb_dst_set(skb, &rt->dst);
822
823 /*
824 * Push down and install the IPIP header.
825 */
826 iph = ip_hdr(skb);
827 iph->version = 4;
828 iph->ihl = sizeof(struct iphdr)>>2;
829 iph->frag_off = df;
830 iph->protocol = IPPROTO_IPIP;
831 iph->tos = tos;
832 iph->daddr = rt->rt_dst;
833 iph->saddr = rt->rt_src;
834 iph->ttl = old_iph->ttl;
835 ip_select_ident(iph, &rt->dst, NULL);
836
837 /* Another hack: avoid icmp_send in ip_fragment */
838 skb->local_df = 1;
839
840 ret = IP_VS_XMIT_TUNNEL(skb, cp);
841 if (ret == NF_ACCEPT)
842 ip_local_out(skb);
843 else if (ret == NF_DROP)
844 kfree_skb(skb);
845
846 LeaveFunction(10);
847
848 return NF_STOLEN;
849
850 tx_error_icmp:
851 dst_link_failure(skb);
852 tx_error:
853 kfree_skb(skb);
854 LeaveFunction(10);
855 return NF_STOLEN;
856 tx_error_put:
857 ip_rt_put(rt);
858 goto tx_error;
859 }
860
861 #ifdef CONFIG_IP_VS_IPV6
862 int
863 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
864 struct ip_vs_protocol *pp)
865 {
866 struct rt6_info *rt; /* Route to the other host */
867 struct in6_addr saddr; /* Source for tunnel */
868 struct net_device *tdev; /* Device to other host */
869 struct ipv6hdr *old_iph = ipv6_hdr(skb);
870 struct ipv6hdr *iph; /* Our new IP header */
871 unsigned int max_headroom; /* The extra header space needed */
872 int mtu;
873 int ret;
874
875 EnterFunction(10);
876
877 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
878 &saddr, 1, 1|2)))
879 goto tx_error_icmp;
880 if (__ip_vs_is_local_route6(rt)) {
881 dst_release(&rt->dst);
882 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
883 }
884
885 tdev = rt->dst.dev;
886
887 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
888 if (mtu < IPV6_MIN_MTU) {
889 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
890 IPV6_MIN_MTU);
891 goto tx_error_put;
892 }
893 if (skb_dst(skb))
894 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
895
896 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
897 !skb_is_gso(skb)) {
898 if (!skb->dev) {
899 struct net *net = dev_net(skb_dst(skb)->dev);
900
901 skb->dev = net->loopback_dev;
902 }
903 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
904 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
905 goto tx_error_put;
906 }
907
908 /*
909 * Okay, now see if we can stuff it in the buffer as-is.
910 */
911 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
912
913 if (skb_headroom(skb) < max_headroom
914 || skb_cloned(skb) || skb_shared(skb)) {
915 struct sk_buff *new_skb =
916 skb_realloc_headroom(skb, max_headroom);
917 if (!new_skb) {
918 dst_release(&rt->dst);
919 kfree_skb(skb);
920 IP_VS_ERR_RL("%s(): no memory\n", __func__);
921 return NF_STOLEN;
922 }
923 kfree_skb(skb);
924 skb = new_skb;
925 old_iph = ipv6_hdr(skb);
926 }
927
928 skb->transport_header = skb->network_header;
929
930 skb_push(skb, sizeof(struct ipv6hdr));
931 skb_reset_network_header(skb);
932 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
933
934 /* drop old route */
935 skb_dst_drop(skb);
936 skb_dst_set(skb, &rt->dst);
937
938 /*
939 * Push down and install the IPIP header.
940 */
941 iph = ipv6_hdr(skb);
942 iph->version = 6;
943 iph->nexthdr = IPPROTO_IPV6;
944 iph->payload_len = old_iph->payload_len;
945 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
946 iph->priority = old_iph->priority;
947 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
948 ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
949 ipv6_addr_copy(&iph->saddr, &saddr);
950 iph->hop_limit = old_iph->hop_limit;
951
952 /* Another hack: avoid icmp_send in ip_fragment */
953 skb->local_df = 1;
954
955 ret = IP_VS_XMIT_TUNNEL(skb, cp);
956 if (ret == NF_ACCEPT)
957 ip6_local_out(skb);
958 else if (ret == NF_DROP)
959 kfree_skb(skb);
960
961 LeaveFunction(10);
962
963 return NF_STOLEN;
964
965 tx_error_icmp:
966 dst_link_failure(skb);
967 tx_error:
968 kfree_skb(skb);
969 LeaveFunction(10);
970 return NF_STOLEN;
971 tx_error_put:
972 dst_release(&rt->dst);
973 goto tx_error;
974 }
975 #endif
976
977
978 /*
979 * Direct Routing transmitter
980 * Used for ANY protocol
981 */
982 int
983 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
984 struct ip_vs_protocol *pp)
985 {
986 struct rtable *rt; /* Route to the other host */
987 struct iphdr *iph = ip_hdr(skb);
988 int mtu;
989
990 EnterFunction(10);
991
992 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
993 RT_TOS(iph->tos),
994 IP_VS_RT_MODE_LOCAL |
995 IP_VS_RT_MODE_NON_LOCAL)))
996 goto tx_error_icmp;
997 if (rt->rt_flags & RTCF_LOCAL) {
998 ip_rt_put(rt);
999 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
1000 }
1001
1002 /* MTU checking */
1003 mtu = dst_mtu(&rt->dst);
1004 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
1005 !skb_is_gso(skb)) {
1006 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
1007 ip_rt_put(rt);
1008 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1009 goto tx_error;
1010 }
1011
1012 /*
1013 * Call ip_send_check because we are not sure it is called
1014 * after ip_defrag. Is copy-on-write needed?
1015 */
1016 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1017 ip_rt_put(rt);
1018 return NF_STOLEN;
1019 }
1020 ip_send_check(ip_hdr(skb));
1021
1022 /* drop old route */
1023 skb_dst_drop(skb);
1024 skb_dst_set(skb, &rt->dst);
1025
1026 /* Another hack: avoid icmp_send in ip_fragment */
1027 skb->local_df = 1;
1028
1029 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
1030
1031 LeaveFunction(10);
1032 return NF_STOLEN;
1033
1034 tx_error_icmp:
1035 dst_link_failure(skb);
1036 tx_error:
1037 kfree_skb(skb);
1038 LeaveFunction(10);
1039 return NF_STOLEN;
1040 }
1041
1042 #ifdef CONFIG_IP_VS_IPV6
1043 int
1044 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1045 struct ip_vs_protocol *pp)
1046 {
1047 struct rt6_info *rt; /* Route to the other host */
1048 int mtu;
1049
1050 EnterFunction(10);
1051
1052 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1053 0, 1|2)))
1054 goto tx_error_icmp;
1055 if (__ip_vs_is_local_route6(rt)) {
1056 dst_release(&rt->dst);
1057 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
1058 }
1059
1060 /* MTU checking */
1061 mtu = dst_mtu(&rt->dst);
1062 if (skb->len > mtu) {
1063 if (!skb->dev) {
1064 struct net *net = dev_net(skb_dst(skb)->dev);
1065
1066 skb->dev = net->loopback_dev;
1067 }
1068 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1069 dst_release(&rt->dst);
1070 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1071 goto tx_error;
1072 }
1073
1074 /*
1075 * Call ip_send_check because we are not sure it is called
1076 * after ip_defrag. Is copy-on-write needed?
1077 */
1078 skb = skb_share_check(skb, GFP_ATOMIC);
1079 if (unlikely(skb == NULL)) {
1080 dst_release(&rt->dst);
1081 return NF_STOLEN;
1082 }
1083
1084 /* drop old route */
1085 skb_dst_drop(skb);
1086 skb_dst_set(skb, &rt->dst);
1087
1088 /* Another hack: avoid icmp_send in ip_fragment */
1089 skb->local_df = 1;
1090
1091 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
1092
1093 LeaveFunction(10);
1094 return NF_STOLEN;
1095
1096 tx_error_icmp:
1097 dst_link_failure(skb);
1098 tx_error:
1099 kfree_skb(skb);
1100 LeaveFunction(10);
1101 return NF_STOLEN;
1102 }
1103 #endif
1104
1105
1106 /*
1107 * ICMP packet transmitter
1108 * called by the ip_vs_in_icmp
1109 */
1110 int
1111 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1112 struct ip_vs_protocol *pp, int offset)
1113 {
1114 struct rtable *rt; /* Route to the other host */
1115 int mtu;
1116 int rc;
1117 int local;
1118
1119 EnterFunction(10);
1120
1121 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1122 forwarded directly here, because there is no need to
1123 translate address/port back */
1124 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1125 if (cp->packet_xmit)
1126 rc = cp->packet_xmit(skb, cp, pp);
1127 else
1128 rc = NF_ACCEPT;
1129 /* do not touch skb anymore */
1130 atomic_inc(&cp->in_pkts);
1131 goto out;
1132 }
1133
1134 /*
1135 * mangle and send the packet here (only for VS/NAT)
1136 */
1137
1138 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1139 RT_TOS(ip_hdr(skb)->tos),
1140 IP_VS_RT_MODE_LOCAL |
1141 IP_VS_RT_MODE_NON_LOCAL |
1142 IP_VS_RT_MODE_RDR)))
1143 goto tx_error_icmp;
1144 local = rt->rt_flags & RTCF_LOCAL;
1145
1146 /*
1147 * Avoid duplicate tuple in reply direction for NAT traffic
1148 * to local address when connection is sync-ed
1149 */
1150 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1151 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1152 enum ip_conntrack_info ctinfo;
1153 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1154
1155 if (ct && !nf_ct_is_untracked(ct)) {
1156 IP_VS_DBG(10, "%s(): "
1157 "stopping DNAT to local address %pI4\n",
1158 __func__, &cp->daddr.ip);
1159 goto tx_error_put;
1160 }
1161 }
1162 #endif
1163
1164 /* From world but DNAT to loopback address? */
1165 if (local && ipv4_is_loopback(rt->rt_dst) &&
1166 rt_is_input_route(skb_rtable(skb))) {
1167 IP_VS_DBG(1, "%s(): "
1168 "stopping DNAT to loopback %pI4\n",
1169 __func__, &cp->daddr.ip);
1170 goto tx_error_put;
1171 }
1172
1173 /* MTU checking */
1174 mtu = dst_mtu(&rt->dst);
1175 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
1176 !skb_is_gso(skb)) {
1177 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1178 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1179 goto tx_error_put;
1180 }
1181
1182 /* copy-on-write the packet before mangling it */
1183 if (!skb_make_writable(skb, offset))
1184 goto tx_error_put;
1185
1186 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1187 goto tx_error_put;
1188
1189 ip_vs_nat_icmp(skb, pp, cp, 0);
1190
1191 if (!local) {
1192 /* drop the old route when skb is not shared */
1193 skb_dst_drop(skb);
1194 skb_dst_set(skb, &rt->dst);
1195 } else {
1196 ip_rt_put(rt);
1197 /*
1198 * Some IPv4 replies get local address from routes,
1199 * not from iph, so while we DNAT after routing
1200 * we need this second input/output route.
1201 */
1202 if (!__ip_vs_reroute_locally(skb))
1203 goto tx_error;
1204 }
1205
1206 /* Another hack: avoid icmp_send in ip_fragment */
1207 skb->local_df = 1;
1208
1209 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
1210
1211 rc = NF_STOLEN;
1212 goto out;
1213
1214 tx_error_icmp:
1215 dst_link_failure(skb);
1216 tx_error:
1217 dev_kfree_skb(skb);
1218 rc = NF_STOLEN;
1219 out:
1220 LeaveFunction(10);
1221 return rc;
1222 tx_error_put:
1223 ip_rt_put(rt);
1224 goto tx_error;
1225 }
1226
1227 #ifdef CONFIG_IP_VS_IPV6
1228 int
1229 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1230 struct ip_vs_protocol *pp, int offset)
1231 {
1232 struct rt6_info *rt; /* Route to the other host */
1233 int mtu;
1234 int rc;
1235 int local;
1236
1237 EnterFunction(10);
1238
1239 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1240 forwarded directly here, because there is no need to
1241 translate address/port back */
1242 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1243 if (cp->packet_xmit)
1244 rc = cp->packet_xmit(skb, cp, pp);
1245 else
1246 rc = NF_ACCEPT;
1247 /* do not touch skb anymore */
1248 atomic_inc(&cp->in_pkts);
1249 goto out;
1250 }
1251
1252 /*
1253 * mangle and send the packet here (only for VS/NAT)
1254 */
1255
1256 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1257 0, 1|2|4)))
1258 goto tx_error_icmp;
1259
1260 local = __ip_vs_is_local_route6(rt);
1261 /*
1262 * Avoid duplicate tuple in reply direction for NAT traffic
1263 * to local address when connection is sync-ed
1264 */
1265 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1266 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1267 enum ip_conntrack_info ctinfo;
1268 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1269
1270 if (ct && !nf_ct_is_untracked(ct)) {
1271 IP_VS_DBG(10, "%s(): "
1272 "stopping DNAT to local address %pI6\n",
1273 __func__, &cp->daddr.in6);
1274 goto tx_error_put;
1275 }
1276 }
1277 #endif
1278
1279 /* From world but DNAT to loopback address? */
1280 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1281 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
1282 IP_VS_DBG(1, "%s(): "
1283 "stopping DNAT to loopback %pI6\n",
1284 __func__, &cp->daddr.in6);
1285 goto tx_error_put;
1286 }
1287
1288 /* MTU checking */
1289 mtu = dst_mtu(&rt->dst);
1290 if (skb->len > mtu && !skb_is_gso(skb)) {
1291 if (!skb->dev) {
1292 struct net *net = dev_net(skb_dst(skb)->dev);
1293
1294 skb->dev = net->loopback_dev;
1295 }
1296 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1297 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1298 goto tx_error_put;
1299 }
1300
1301 /* copy-on-write the packet before mangling it */
1302 if (!skb_make_writable(skb, offset))
1303 goto tx_error_put;
1304
1305 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1306 goto tx_error_put;
1307
1308 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1309
1310 if (!local || !skb->dev) {
1311 /* drop the old route when skb is not shared */
1312 skb_dst_drop(skb);
1313 skb_dst_set(skb, &rt->dst);
1314 } else {
1315 /* destined to loopback, do we need to change route? */
1316 dst_release(&rt->dst);
1317 }
1318
1319 /* Another hack: avoid icmp_send in ip_fragment */
1320 skb->local_df = 1;
1321
1322 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
1323
1324 rc = NF_STOLEN;
1325 goto out;
1326
1327 tx_error_icmp:
1328 dst_link_failure(skb);
1329 tx_error:
1330 dev_kfree_skb(skb);
1331 rc = NF_STOLEN;
1332 out:
1333 LeaveFunction(10);
1334 return rc;
1335 tx_error_put:
1336 dst_release(&rt->dst);
1337 goto tx_error;
1338 }
1339 #endif