[SK_BUFF]: Introduce ipv6_hdr(), remove skb->nh.ipv6h
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / ip6_output.c
CommitLineData
1da177e4
LT
1/*
2 * IPv6 output functions
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4
LT
7 *
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on linux/net/ipv4/ip_output.c
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * Changes:
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
22 * etc.
23 *
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
28 * for datagram xmit
29 */
30
1da177e4
LT
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/socket.h>
35#include <linux/net.h>
36#include <linux/netdevice.h>
37#include <linux/if_arp.h>
38#include <linux/in6.h>
39#include <linux/tcp.h>
40#include <linux/route.h>
b59f45d0 41#include <linux/module.h>
1da177e4
LT
42
43#include <linux/netfilter.h>
44#include <linux/netfilter_ipv6.h>
45
46#include <net/sock.h>
47#include <net/snmp.h>
48
49#include <net/ipv6.h>
50#include <net/ndisc.h>
51#include <net/protocol.h>
52#include <net/ip6_route.h>
53#include <net/addrconf.h>
54#include <net/rawv6.h>
55#include <net/icmp.h>
56#include <net/xfrm.h>
57#include <net/checksum.h>
58
59static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60
61static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
62{
63 static u32 ipv6_fragmentation_id = 1;
64 static DEFINE_SPINLOCK(ip6_id_lock);
65
66 spin_lock_bh(&ip6_id_lock);
67 fhdr->identification = htonl(ipv6_fragmentation_id);
68 if (++ipv6_fragmentation_id == 0)
69 ipv6_fragmentation_id = 1;
70 spin_unlock_bh(&ip6_id_lock);
71}
72
73static inline int ip6_output_finish(struct sk_buff *skb)
74{
1da177e4 75 struct dst_entry *dst = skb->dst;
1da177e4 76
3644f0ce
SH
77 if (dst->hh)
78 return neigh_hh_output(dst->hh, skb);
79 else if (dst->neighbour)
1da177e4
LT
80 return dst->neighbour->output(skb);
81
a11d206d 82 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
1da177e4
LT
83 kfree_skb(skb);
84 return -EINVAL;
85
86}
87
88/* dev_loopback_xmit for use with netfilter. */
89static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
90{
459a98ed 91 skb_reset_mac_header(newskb);
bbe735e4 92 __skb_pull(newskb, skb_network_offset(newskb));
1da177e4
LT
93 newskb->pkt_type = PACKET_LOOPBACK;
94 newskb->ip_summed = CHECKSUM_UNNECESSARY;
95 BUG_TRAP(newskb->dst);
96
97 netif_rx(newskb);
98 return 0;
99}
100
101
102static int ip6_output2(struct sk_buff *skb)
103{
104 struct dst_entry *dst = skb->dst;
105 struct net_device *dev = dst->dev;
106
107 skb->protocol = htons(ETH_P_IPV6);
108 skb->dev = dev;
109
0660e03f 110 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
1da177e4 111 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
a11d206d 112 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
1da177e4
LT
113
114 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
0660e03f
ACM
115 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
116 &ipv6_hdr(skb)->saddr)) {
1da177e4
LT
117 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
118
119 /* Do not check for IFF_ALLMULTI; multicast routing
120 is not supported in any case.
121 */
122 if (newskb)
123 NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
124 newskb->dev,
125 ip6_dev_loopback_xmit);
126
0660e03f 127 if (ipv6_hdr(skb)->hop_limit == 0) {
a11d206d 128 IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
129 kfree_skb(skb);
130 return 0;
131 }
132 }
133
a11d206d 134 IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
1da177e4
LT
135 }
136
137 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
138}
139
140int ip6_output(struct sk_buff *skb)
141{
89114afd 142 if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
e89e9cf5 143 dst_allfrag(skb->dst))
1da177e4
LT
144 return ip6_fragment(skb, ip6_output2);
145 else
146 return ip6_output2(skb);
147}
148
1da177e4
LT
149/*
150 * xmit an sk_buff (used by TCP)
151 */
152
153int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
154 struct ipv6_txoptions *opt, int ipfragok)
155{
b30bd282 156 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4
LT
157 struct in6_addr *first_hop = &fl->fl6_dst;
158 struct dst_entry *dst = skb->dst;
159 struct ipv6hdr *hdr;
160 u8 proto = fl->proto;
161 int seg_len = skb->len;
41a1f8ea 162 int hlimit, tclass;
1da177e4
LT
163 u32 mtu;
164
165 if (opt) {
166 int head_room;
167
168 /* First: exthdrs may take lots of space (~8K for now)
169 MAX_HEADER is not enough.
170 */
171 head_room = opt->opt_nflen + opt->opt_flen;
172 seg_len += head_room;
173 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
174
175 if (skb_headroom(skb) < head_room) {
176 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
a11d206d
YH
177 if (skb2 == NULL) {
178 IP6_INC_STATS(ip6_dst_idev(skb->dst),
179 IPSTATS_MIB_OUTDISCARDS);
180 kfree_skb(skb);
1da177e4
LT
181 return -ENOBUFS;
182 }
a11d206d
YH
183 kfree_skb(skb);
184 skb = skb2;
1da177e4
LT
185 if (sk)
186 skb_set_owner_w(skb, sk);
187 }
188 if (opt->opt_flen)
189 ipv6_push_frag_opts(skb, opt, &proto);
190 if (opt->opt_nflen)
191 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
192 }
193
e2d1bca7
ACM
194 skb_push(skb, sizeof(struct ipv6hdr));
195 skb_reset_network_header(skb);
0660e03f 196 hdr = ipv6_hdr(skb);
1da177e4
LT
197
198 /*
199 * Fill in the IPv6 header
200 */
201
1da177e4
LT
202 hlimit = -1;
203 if (np)
204 hlimit = np->hop_limit;
205 if (hlimit < 0)
206 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
207 if (hlimit < 0)
208 hlimit = ipv6_get_hoplimit(dst->dev);
209
41a1f8ea
YH
210 tclass = -1;
211 if (np)
212 tclass = np->tclass;
213 if (tclass < 0)
214 tclass = 0;
215
90bcaf7b 216 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
41a1f8ea 217
1da177e4
LT
218 hdr->payload_len = htons(seg_len);
219 hdr->nexthdr = proto;
220 hdr->hop_limit = hlimit;
221
222 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
223 ipv6_addr_copy(&hdr->daddr, first_hop);
224
a2c2064f
PM
225 skb->priority = sk->sk_priority;
226
1da177e4 227 mtu = dst_mtu(dst);
89114afd 228 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
a11d206d
YH
229 IP6_INC_STATS(ip6_dst_idev(skb->dst),
230 IPSTATS_MIB_OUTREQUESTS);
6869c4d8
HW
231 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
232 dst_output);
1da177e4
LT
233 }
234
235 if (net_ratelimit())
236 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
237 skb->dev = dst->dev;
238 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
a11d206d 239 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
240 kfree_skb(skb);
241 return -EMSGSIZE;
242}
243
7159039a
YH
244EXPORT_SYMBOL(ip6_xmit);
245
1da177e4
LT
246/*
247 * To avoid extra problems ND packets are send through this
248 * routine. It's code duplication but I really want to avoid
249 * extra checks since ipv6_build_header is used by TCP (which
250 * is for us performance critical)
251 */
252
253int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
254 struct in6_addr *saddr, struct in6_addr *daddr,
255 int proto, int len)
256{
257 struct ipv6_pinfo *np = inet6_sk(sk);
258 struct ipv6hdr *hdr;
259 int totlen;
260
261 skb->protocol = htons(ETH_P_IPV6);
262 skb->dev = dev;
263
264 totlen = len + sizeof(struct ipv6hdr);
265
0660e03f
ACM
266 skb->nh.raw = skb_put(skb, sizeof(struct ipv6hdr));
267 hdr = ipv6_hdr(skb);
1da177e4 268
ae08e1f0 269 *(__be32*)hdr = htonl(0x60000000);
1da177e4
LT
270
271 hdr->payload_len = htons(len);
272 hdr->nexthdr = proto;
273 hdr->hop_limit = np->hop_limit;
274
275 ipv6_addr_copy(&hdr->saddr, saddr);
276 ipv6_addr_copy(&hdr->daddr, daddr);
277
278 return 0;
279}
280
281static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
282{
283 struct ip6_ra_chain *ra;
284 struct sock *last = NULL;
285
286 read_lock(&ip6_ra_lock);
287 for (ra = ip6_ra_chain; ra; ra = ra->next) {
288 struct sock *sk = ra->sk;
0bd1b59b
AM
289 if (sk && ra->sel == sel &&
290 (!sk->sk_bound_dev_if ||
291 sk->sk_bound_dev_if == skb->dev->ifindex)) {
1da177e4
LT
292 if (last) {
293 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
294 if (skb2)
295 rawv6_rcv(last, skb2);
296 }
297 last = sk;
298 }
299 }
300
301 if (last) {
302 rawv6_rcv(last, skb);
303 read_unlock(&ip6_ra_lock);
304 return 1;
305 }
306 read_unlock(&ip6_ra_lock);
307 return 0;
308}
309
e21e0b5f
VN
310static int ip6_forward_proxy_check(struct sk_buff *skb)
311{
0660e03f 312 struct ipv6hdr *hdr = ipv6_hdr(skb);
e21e0b5f
VN
313 u8 nexthdr = hdr->nexthdr;
314 int offset;
315
316 if (ipv6_ext_hdr(nexthdr)) {
317 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
318 if (offset < 0)
319 return 0;
320 } else
321 offset = sizeof(struct ipv6hdr);
322
323 if (nexthdr == IPPROTO_ICMPV6) {
324 struct icmp6hdr *icmp6;
325
d56f90a7
ACM
326 if (!pskb_may_pull(skb, (skb_network_header(skb) +
327 offset + 1 - skb->data)))
e21e0b5f
VN
328 return 0;
329
d56f90a7 330 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
e21e0b5f
VN
331
332 switch (icmp6->icmp6_type) {
333 case NDISC_ROUTER_SOLICITATION:
334 case NDISC_ROUTER_ADVERTISEMENT:
335 case NDISC_NEIGHBOUR_SOLICITATION:
336 case NDISC_NEIGHBOUR_ADVERTISEMENT:
337 case NDISC_REDIRECT:
338 /* For reaction involving unicast neighbor discovery
339 * message destined to the proxied address, pass it to
340 * input function.
341 */
342 return 1;
343 default:
344 break;
345 }
346 }
347
74553b09
VN
348 /*
349 * The proxying router can't forward traffic sent to a link-local
350 * address, so signal the sender and discard the packet. This
351 * behavior is clarified by the MIPv6 specification.
352 */
353 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
354 dst_link_failure(skb);
355 return -1;
356 }
357
e21e0b5f
VN
358 return 0;
359}
360
1da177e4
LT
361static inline int ip6_forward_finish(struct sk_buff *skb)
362{
363 return dst_output(skb);
364}
365
366int ip6_forward(struct sk_buff *skb)
367{
368 struct dst_entry *dst = skb->dst;
0660e03f 369 struct ipv6hdr *hdr = ipv6_hdr(skb);
1da177e4 370 struct inet6_skb_parm *opt = IP6CB(skb);
1ab1457c 371
1da177e4
LT
372 if (ipv6_devconf.forwarding == 0)
373 goto error;
374
375 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
a11d206d 376 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
1da177e4
LT
377 goto drop;
378 }
379
380 skb->ip_summed = CHECKSUM_NONE;
381
382 /*
383 * We DO NOT make any processing on
384 * RA packets, pushing them to user level AS IS
385 * without ane WARRANTY that application will be able
386 * to interpret them. The reason is that we
387 * cannot make anything clever here.
388 *
389 * We are not end-node, so that if packet contains
390 * AH/ESP, we cannot make anything.
391 * Defragmentation also would be mistake, RA packets
392 * cannot be fragmented, because there is no warranty
393 * that different fragments will go along one path. --ANK
394 */
395 if (opt->ra) {
d56f90a7 396 u8 *ptr = skb_network_header(skb) + opt->ra;
1da177e4
LT
397 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
398 return 0;
399 }
400
401 /*
402 * check and decrement ttl
403 */
404 if (hdr->hop_limit <= 1) {
405 /* Force OUTPUT device used as source address */
406 skb->dev = dst->dev;
407 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
408 0, skb->dev);
a11d206d 409 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
1da177e4
LT
410
411 kfree_skb(skb);
412 return -ETIMEDOUT;
413 }
414
fbea49e1
YH
415 /* XXX: idev->cnf.proxy_ndp? */
416 if (ipv6_devconf.proxy_ndp &&
417 pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
74553b09
VN
418 int proxied = ip6_forward_proxy_check(skb);
419 if (proxied > 0)
e21e0b5f 420 return ip6_input(skb);
74553b09 421 else if (proxied < 0) {
a11d206d 422 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
74553b09
VN
423 goto drop;
424 }
e21e0b5f
VN
425 }
426
1da177e4 427 if (!xfrm6_route_forward(skb)) {
a11d206d 428 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
1da177e4
LT
429 goto drop;
430 }
431 dst = skb->dst;
432
433 /* IPv6 specs say nothing about it, but it is clear that we cannot
434 send redirects to source routed frames.
435 */
436 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
437 struct in6_addr *target = NULL;
438 struct rt6_info *rt;
439 struct neighbour *n = dst->neighbour;
440
441 /*
442 * incoming and outgoing devices are the same
443 * send a redirect.
444 */
445
446 rt = (struct rt6_info *) dst;
447 if ((rt->rt6i_flags & RTF_GATEWAY))
448 target = (struct in6_addr*)&n->primary_key;
449 else
450 target = &hdr->daddr;
451
452 /* Limit redirects both by destination (here)
453 and by source (inside ndisc_send_redirect)
454 */
455 if (xrlim_allow(dst, 1*HZ))
456 ndisc_send_redirect(skb, n, target);
457 } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
458 |IPV6_ADDR_LINKLOCAL)) {
459 /* This check is security critical. */
460 goto error;
461 }
462
463 if (skb->len > dst_mtu(dst)) {
464 /* Again, force OUTPUT device used as source address */
465 skb->dev = dst->dev;
466 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
a11d206d
YH
467 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
468 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
469 kfree_skb(skb);
470 return -EMSGSIZE;
471 }
472
473 if (skb_cow(skb, dst->dev->hard_header_len)) {
a11d206d 474 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
475 goto drop;
476 }
477
0660e03f 478 hdr = ipv6_hdr(skb);
1da177e4
LT
479
480 /* Mangling hops number delayed to point after skb COW */
1ab1457c 481
1da177e4
LT
482 hdr->hop_limit--;
483
a11d206d 484 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
1da177e4
LT
485 return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
486
487error:
a11d206d 488 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
1da177e4
LT
489drop:
490 kfree_skb(skb);
491 return -EINVAL;
492}
493
494static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
495{
496 to->pkt_type = from->pkt_type;
497 to->priority = from->priority;
498 to->protocol = from->protocol;
1da177e4
LT
499 dst_release(to->dst);
500 to->dst = dst_clone(from->dst);
501 to->dev = from->dev;
82e91ffe 502 to->mark = from->mark;
1da177e4
LT
503
504#ifdef CONFIG_NET_SCHED
505 to->tc_index = from->tc_index;
506#endif
507#ifdef CONFIG_NETFILTER
1da177e4 508 /* Connection association is same as pre-frag packet */
9fb9cbb1 509 nf_conntrack_put(to->nfct);
1da177e4
LT
510 to->nfct = from->nfct;
511 nf_conntrack_get(to->nfct);
512 to->nfctinfo = from->nfctinfo;
9fb9cbb1
YK
513#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
514 nf_conntrack_put_reasm(to->nfct_reasm);
515 to->nfct_reasm = from->nfct_reasm;
516 nf_conntrack_get_reasm(to->nfct_reasm);
517#endif
1da177e4
LT
518#ifdef CONFIG_BRIDGE_NETFILTER
519 nf_bridge_put(to->nf_bridge);
520 to->nf_bridge = from->nf_bridge;
521 nf_bridge_get(to->nf_bridge);
522#endif
1da177e4 523#endif
984bc16c 524 skb_copy_secmark(to, from);
1da177e4
LT
525}
526
527int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
528{
529 u16 offset = sizeof(struct ipv6hdr);
0660e03f
ACM
530 struct ipv6_opt_hdr *exthdr =
531 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
d56f90a7 532 unsigned int packet_len = skb->tail - skb_network_header(skb);
1da177e4 533 int found_rhdr = 0;
0660e03f 534 *nexthdr = &ipv6_hdr(skb)->nexthdr;
1da177e4
LT
535
536 while (offset + 1 <= packet_len) {
537
538 switch (**nexthdr) {
539
540 case NEXTHDR_HOP:
27637df9 541 break;
1da177e4 542 case NEXTHDR_ROUTING:
27637df9
MN
543 found_rhdr = 1;
544 break;
1da177e4 545 case NEXTHDR_DEST:
27637df9
MN
546#ifdef CONFIG_IPV6_MIP6
547 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
548 break;
549#endif
550 if (found_rhdr)
551 return offset;
1da177e4
LT
552 break;
553 default :
554 return offset;
555 }
27637df9
MN
556
557 offset += ipv6_optlen(exthdr);
558 *nexthdr = &exthdr->nexthdr;
d56f90a7
ACM
559 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
560 offset);
1da177e4
LT
561 }
562
563 return offset;
564}
b59f45d0 565EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
1da177e4
LT
566
567static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
568{
569 struct net_device *dev;
570 struct sk_buff *frag;
571 struct rt6_info *rt = (struct rt6_info*)skb->dst;
d91675f9 572 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
1da177e4
LT
573 struct ipv6hdr *tmp_hdr;
574 struct frag_hdr *fh;
575 unsigned int mtu, hlen, left, len;
ae08e1f0 576 __be32 frag_id = 0;
1da177e4
LT
577 int ptr, offset = 0, err=0;
578 u8 *prevhdr, nexthdr = 0;
579
580 dev = rt->u.dst.dev;
581 hlen = ip6_find_1stfragopt(skb, &prevhdr);
582 nexthdr = *prevhdr;
583
d91675f9
YH
584 mtu = dst_mtu(&rt->u.dst);
585 if (np && np->frag_size < mtu) {
586 if (np->frag_size)
587 mtu = np->frag_size;
588 }
589 mtu -= hlen + sizeof(struct frag_hdr);
1da177e4
LT
590
591 if (skb_shinfo(skb)->frag_list) {
592 int first_len = skb_pagelen(skb);
593
594 if (first_len - hlen > mtu ||
595 ((first_len - hlen) & 7) ||
596 skb_cloned(skb))
597 goto slow_path;
598
599 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
600 /* Correct geometry. */
601 if (frag->len > mtu ||
602 ((frag->len & 7) && frag->next) ||
603 skb_headroom(frag) < hlen)
604 goto slow_path;
605
1da177e4
LT
606 /* Partially cloned skb? */
607 if (skb_shared(frag))
608 goto slow_path;
2fdba6b0
HX
609
610 BUG_ON(frag->sk);
611 if (skb->sk) {
612 sock_hold(skb->sk);
613 frag->sk = skb->sk;
614 frag->destructor = sock_wfree;
615 skb->truesize -= frag->truesize;
616 }
1da177e4
LT
617 }
618
619 err = 0;
620 offset = 0;
621 frag = skb_shinfo(skb)->frag_list;
622 skb_shinfo(skb)->frag_list = NULL;
623 /* BUILD HEADER */
624
9a217a1c 625 *prevhdr = NEXTHDR_FRAGMENT;
d56f90a7 626 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
1da177e4 627 if (!tmp_hdr) {
a11d206d 628 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
629 return -ENOMEM;
630 }
631
1da177e4
LT
632 __skb_pull(skb, hlen);
633 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
e2d1bca7
ACM
634 __skb_push(skb, hlen);
635 skb_reset_network_header(skb);
d56f90a7 636 memcpy(skb_network_header(skb), tmp_hdr, hlen);
1da177e4
LT
637
638 ipv6_select_ident(skb, fh);
639 fh->nexthdr = nexthdr;
640 fh->reserved = 0;
641 fh->frag_off = htons(IP6_MF);
642 frag_id = fh->identification;
643
644 first_len = skb_pagelen(skb);
645 skb->data_len = first_len - skb_headlen(skb);
646 skb->len = first_len;
0660e03f
ACM
647 ipv6_hdr(skb)->payload_len = htons(first_len -
648 sizeof(struct ipv6hdr));
a11d206d
YH
649
650 dst_hold(&rt->u.dst);
1da177e4
LT
651
652 for (;;) {
653 /* Prepare header of the next frame,
654 * before previous one went down. */
655 if (frag) {
656 frag->ip_summed = CHECKSUM_NONE;
657 frag->h.raw = frag->data;
658 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
e2d1bca7
ACM
659 __skb_push(frag, hlen);
660 skb_reset_network_header(frag);
d56f90a7
ACM
661 memcpy(skb_network_header(frag), tmp_hdr,
662 hlen);
1da177e4
LT
663 offset += skb->len - hlen - sizeof(struct frag_hdr);
664 fh->nexthdr = nexthdr;
665 fh->reserved = 0;
666 fh->frag_off = htons(offset);
667 if (frag->next != NULL)
668 fh->frag_off |= htons(IP6_MF);
669 fh->identification = frag_id;
0660e03f
ACM
670 ipv6_hdr(frag)->payload_len =
671 htons(frag->len -
672 sizeof(struct ipv6hdr));
1da177e4
LT
673 ip6_copy_metadata(frag, skb);
674 }
1ab1457c 675
1da177e4 676 err = output(skb);
dafee490 677 if(!err)
a11d206d 678 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
dafee490 679
1da177e4
LT
680 if (err || !frag)
681 break;
682
683 skb = frag;
684 frag = skb->next;
685 skb->next = NULL;
686 }
687
a51482bd 688 kfree(tmp_hdr);
1da177e4
LT
689
690 if (err == 0) {
a11d206d
YH
691 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
692 dst_release(&rt->u.dst);
1da177e4
LT
693 return 0;
694 }
695
696 while (frag) {
697 skb = frag->next;
698 kfree_skb(frag);
699 frag = skb;
700 }
701
a11d206d
YH
702 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
703 dst_release(&rt->u.dst);
1da177e4
LT
704 return err;
705 }
706
707slow_path:
708 left = skb->len - hlen; /* Space per frame */
709 ptr = hlen; /* Where to start from */
710
711 /*
712 * Fragment the datagram.
713 */
714
715 *prevhdr = NEXTHDR_FRAGMENT;
716
717 /*
718 * Keep copying data until we run out.
719 */
720 while(left > 0) {
721 len = left;
722 /* IF: it doesn't fit, use 'mtu' - the data space left */
723 if (len > mtu)
724 len = mtu;
725 /* IF: we are not sending upto and including the packet end
726 then align the next start on an eight byte boundary */
727 if (len < left) {
728 len &= ~7;
729 }
730 /*
731 * Allocate buffer.
732 */
733
734 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
64ce2073 735 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
a11d206d
YH
736 IP6_INC_STATS(ip6_dst_idev(skb->dst),
737 IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
738 err = -ENOMEM;
739 goto fail;
740 }
741
742 /*
743 * Set up data on packet
744 */
745
746 ip6_copy_metadata(frag, skb);
747 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
748 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
c1d2bbe1 749 skb_reset_network_header(frag);
1da177e4
LT
750 fh = (struct frag_hdr*)(frag->data + hlen);
751 frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
752
753 /*
754 * Charge the memory for the fragment to any owner
755 * it might possess
756 */
757 if (skb->sk)
758 skb_set_owner_w(frag, skb->sk);
759
760 /*
761 * Copy the packet header into the new buffer.
762 */
d56f90a7 763 memcpy(skb_network_header(frag), skb->data, hlen);
1da177e4
LT
764
765 /*
766 * Build fragment header.
767 */
768 fh->nexthdr = nexthdr;
769 fh->reserved = 0;
f36d6ab1 770 if (!frag_id) {
1da177e4
LT
771 ipv6_select_ident(skb, fh);
772 frag_id = fh->identification;
773 } else
774 fh->identification = frag_id;
775
776 /*
777 * Copy a block of the IP datagram.
778 */
779 if (skb_copy_bits(skb, ptr, frag->h.raw, len))
780 BUG();
781 left -= len;
782
783 fh->frag_off = htons(offset);
784 if (left > 0)
785 fh->frag_off |= htons(IP6_MF);
0660e03f
ACM
786 ipv6_hdr(frag)->payload_len = htons(frag->len -
787 sizeof(struct ipv6hdr));
1da177e4
LT
788
789 ptr += len;
790 offset += len;
791
792 /*
793 * Put this fragment into the sending queue.
794 */
1da177e4
LT
795 err = output(frag);
796 if (err)
797 goto fail;
dafee490 798
a11d206d 799 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
1da177e4 800 }
a11d206d
YH
801 IP6_INC_STATS(ip6_dst_idev(skb->dst),
802 IPSTATS_MIB_FRAGOKS);
1da177e4 803 kfree_skb(skb);
1da177e4
LT
804 return err;
805
806fail:
a11d206d
YH
807 IP6_INC_STATS(ip6_dst_idev(skb->dst),
808 IPSTATS_MIB_FRAGFAILS);
1ab1457c 809 kfree_skb(skb);
1da177e4
LT
810 return err;
811}
812
cf6b1982
YH
813static inline int ip6_rt_check(struct rt6key *rt_key,
814 struct in6_addr *fl_addr,
815 struct in6_addr *addr_cache)
816{
817 return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
818 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
819}
820
497c615a
HX
821static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
822 struct dst_entry *dst,
823 struct flowi *fl)
1da177e4 824{
497c615a
HX
825 struct ipv6_pinfo *np = inet6_sk(sk);
826 struct rt6_info *rt = (struct rt6_info *)dst;
1da177e4 827
497c615a
HX
828 if (!dst)
829 goto out;
830
831 /* Yes, checking route validity in not connected
832 * case is not very simple. Take into account,
833 * that we do not support routing by source, TOS,
834 * and MSG_DONTROUTE --ANK (980726)
835 *
cf6b1982
YH
836 * 1. ip6_rt_check(): If route was host route,
837 * check that cached destination is current.
497c615a
HX
838 * If it is network route, we still may
839 * check its validity using saved pointer
840 * to the last used address: daddr_cache.
841 * We do not want to save whole address now,
842 * (because main consumer of this service
843 * is tcp, which has not this problem),
844 * so that the last trick works only on connected
845 * sockets.
846 * 2. oif also should be the same.
847 */
cf6b1982 848 if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
8e1ef0a9
YH
849#ifdef CONFIG_IPV6_SUBTREES
850 ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
851#endif
cf6b1982 852 (fl->oif && fl->oif != dst->dev->ifindex)) {
497c615a
HX
853 dst_release(dst);
854 dst = NULL;
1da177e4
LT
855 }
856
497c615a
HX
857out:
858 return dst;
859}
860
861static int ip6_dst_lookup_tail(struct sock *sk,
862 struct dst_entry **dst, struct flowi *fl)
863{
864 int err;
865
1da177e4
LT
866 if (*dst == NULL)
867 *dst = ip6_route_output(sk, fl);
868
869 if ((err = (*dst)->error))
870 goto out_err_release;
871
872 if (ipv6_addr_any(&fl->fl6_src)) {
873 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
44456d37 874 if (err)
1da177e4 875 goto out_err_release;
1da177e4
LT
876 }
877
95c385b4
NH
878#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
879 /*
880 * Here if the dst entry we've looked up
881 * has a neighbour entry that is in the INCOMPLETE
882 * state and the src address from the flow is
883 * marked as OPTIMISTIC, we release the found
884 * dst entry and replace it instead with the
885 * dst entry of the nexthop router
886 */
887 if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
888 struct inet6_ifaddr *ifp;
889 struct flowi fl_gw;
890 int redirect;
891
892 ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1);
893
894 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
895 if (ifp)
896 in6_ifa_put(ifp);
897
898 if (redirect) {
899 /*
900 * We need to get the dst entry for the
901 * default router instead
902 */
903 dst_release(*dst);
904 memcpy(&fl_gw, fl, sizeof(struct flowi));
905 memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
906 *dst = ip6_route_output(sk, &fl_gw);
907 if ((err = (*dst)->error))
908 goto out_err_release;
909 }
910 }
911#endif
912
1da177e4
LT
913 return 0;
914
915out_err_release:
916 dst_release(*dst);
917 *dst = NULL;
918 return err;
919}
34a0b3cd 920
497c615a
HX
921/**
922 * ip6_dst_lookup - perform route lookup on flow
923 * @sk: socket which provides route info
924 * @dst: pointer to dst_entry * for result
925 * @fl: flow to lookup
926 *
927 * This function performs a route lookup on the given flow.
928 *
929 * It returns zero on success, or a standard errno code on error.
930 */
931int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
932{
933 *dst = NULL;
934 return ip6_dst_lookup_tail(sk, dst, fl);
935}
3cf3dc6c
ACM
936EXPORT_SYMBOL_GPL(ip6_dst_lookup);
937
497c615a
HX
938/**
939 * ip6_sk_dst_lookup - perform socket cached route lookup on flow
940 * @sk: socket which provides the dst cache and route info
941 * @dst: pointer to dst_entry * for result
942 * @fl: flow to lookup
943 *
944 * This function performs a route lookup on the given flow with the
945 * possibility of using the cached route in the socket if it is valid.
946 * It will take the socket dst lock when operating on the dst cache.
947 * As a result, this function can only be used in process context.
948 *
949 * It returns zero on success, or a standard errno code on error.
950 */
951int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
952{
953 *dst = NULL;
954 if (sk) {
955 *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
956 *dst = ip6_sk_dst_check(sk, *dst, fl);
957 }
958
959 return ip6_dst_lookup_tail(sk, dst, fl);
960}
961EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
962
34a0b3cd 963static inline int ip6_ufo_append_data(struct sock *sk,
e89e9cf5
AR
964 int getfrag(void *from, char *to, int offset, int len,
965 int odd, struct sk_buff *skb),
966 void *from, int length, int hh_len, int fragheaderlen,
967 int transhdrlen, int mtu,unsigned int flags)
968
969{
970 struct sk_buff *skb;
971 int err;
972
973 /* There is support for UDP large send offload by network
974 * device, so create one single skb packet containing complete
975 * udp datagram
976 */
977 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
978 skb = sock_alloc_send_skb(sk,
979 hh_len + fragheaderlen + transhdrlen + 20,
980 (flags & MSG_DONTWAIT), &err);
981 if (skb == NULL)
982 return -ENOMEM;
983
984 /* reserve space for Hardware header */
985 skb_reserve(skb, hh_len);
986
987 /* create space for UDP/IP header */
988 skb_put(skb,fragheaderlen + transhdrlen);
989
990 /* initialize network header pointer */
c1d2bbe1 991 skb_reset_network_header(skb);
e89e9cf5
AR
992
993 /* initialize protocol header pointer */
994 skb->h.raw = skb->data + fragheaderlen;
995
84fa7933 996 skb->ip_summed = CHECKSUM_PARTIAL;
e89e9cf5
AR
997 skb->csum = 0;
998 sk->sk_sndmsg_off = 0;
999 }
1000
1001 err = skb_append_datato_frags(sk,skb, getfrag, from,
1002 (length - transhdrlen));
1003 if (!err) {
1004 struct frag_hdr fhdr;
1005
1006 /* specify the length of each IP datagram fragment*/
1ab1457c 1007 skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
7967168c 1008 sizeof(struct frag_hdr);
f83ef8c0 1009 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
e89e9cf5
AR
1010 ipv6_select_ident(skb, &fhdr);
1011 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1012 __skb_queue_tail(&sk->sk_write_queue, skb);
1013
1014 return 0;
1015 }
1016 /* There is not enough support do UPD LSO,
1017 * so follow normal path
1018 */
1019 kfree_skb(skb);
1020
1021 return err;
1022}
1da177e4 1023
41a1f8ea
YH
1024int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1025 int offset, int len, int odd, struct sk_buff *skb),
1026 void *from, int length, int transhdrlen,
1027 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
1028 struct rt6_info *rt, unsigned int flags)
1da177e4
LT
1029{
1030 struct inet_sock *inet = inet_sk(sk);
1031 struct ipv6_pinfo *np = inet6_sk(sk);
1032 struct sk_buff *skb;
1033 unsigned int maxfraglen, fragheaderlen;
1034 int exthdrlen;
1035 int hh_len;
1036 int mtu;
1037 int copy;
1038 int err;
1039 int offset = 0;
1040 int csummode = CHECKSUM_NONE;
1041
1042 if (flags&MSG_PROBE)
1043 return 0;
1044 if (skb_queue_empty(&sk->sk_write_queue)) {
1045 /*
1046 * setup for corking
1047 */
1048 if (opt) {
1049 if (np->cork.opt == NULL) {
1050 np->cork.opt = kmalloc(opt->tot_len,
1051 sk->sk_allocation);
1052 if (unlikely(np->cork.opt == NULL))
1053 return -ENOBUFS;
1054 } else if (np->cork.opt->tot_len < opt->tot_len) {
1055 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
1056 return -EINVAL;
1057 }
1058 memcpy(np->cork.opt, opt, opt->tot_len);
1059 inet->cork.flags |= IPCORK_OPT;
1060 /* need source address above miyazawa*/
1061 }
1062 dst_hold(&rt->u.dst);
1063 np->cork.rt = rt;
1064 inet->cork.fl = *fl;
1065 np->cork.hop_limit = hlimit;
41a1f8ea 1066 np->cork.tclass = tclass;
d91675f9 1067 mtu = dst_mtu(rt->u.dst.path);
c7503609 1068 if (np->frag_size < mtu) {
d91675f9
YH
1069 if (np->frag_size)
1070 mtu = np->frag_size;
1071 }
1072 inet->cork.fragsize = mtu;
1da177e4
LT
1073 if (dst_allfrag(rt->u.dst.path))
1074 inet->cork.flags |= IPCORK_ALLFRAG;
1075 inet->cork.length = 0;
1076 sk->sk_sndmsg_page = NULL;
1077 sk->sk_sndmsg_off = 0;
1078 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
1079 length += exthdrlen;
1080 transhdrlen += exthdrlen;
1081 } else {
1082 rt = np->cork.rt;
1083 fl = &inet->cork.fl;
1084 if (inet->cork.flags & IPCORK_OPT)
1085 opt = np->cork.opt;
1086 transhdrlen = 0;
1087 exthdrlen = 0;
1088 mtu = inet->cork.fragsize;
1089 }
1090
1091 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1092
1b5c2299 1093 fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
1da177e4
LT
1094 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1095
1096 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1097 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1098 ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
1099 return -EMSGSIZE;
1100 }
1101 }
1102
1103 /*
1104 * Let's try using as much space as possible.
1105 * Use MTU if total length of the message fits into the MTU.
1106 * Otherwise, we need to reserve fragment header and
1107 * fragment alignment (= 8-15 octects, in total).
1108 *
1109 * Note that we may need to "move" the data from the tail of
1ab1457c 1110 * of the buffer to the new fragment when we split
1da177e4
LT
1111 * the message.
1112 *
1ab1457c 1113 * FIXME: It may be fragmented into multiple chunks
1da177e4
LT
1114 * at once if non-fragmentable extension headers
1115 * are too large.
1ab1457c 1116 * --yoshfuji
1da177e4
LT
1117 */
1118
1119 inet->cork.length += length;
e89e9cf5
AR
1120 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
1121 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1122
baa829d8
PM
1123 err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
1124 fragheaderlen, transhdrlen, mtu,
1125 flags);
1126 if (err)
e89e9cf5 1127 goto error;
e89e9cf5
AR
1128 return 0;
1129 }
1da177e4
LT
1130
1131 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1132 goto alloc_new_skb;
1133
1134 while (length > 0) {
1135 /* Check if the remaining data fits into current packet. */
1136 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1137 if (copy < length)
1138 copy = maxfraglen - skb->len;
1139
1140 if (copy <= 0) {
1141 char *data;
1142 unsigned int datalen;
1143 unsigned int fraglen;
1144 unsigned int fraggap;
1145 unsigned int alloclen;
1146 struct sk_buff *skb_prev;
1147alloc_new_skb:
1148 skb_prev = skb;
1149
1150 /* There's no room in the current skb */
1151 if (skb_prev)
1152 fraggap = skb_prev->len - maxfraglen;
1153 else
1154 fraggap = 0;
1155
1156 /*
1157 * If remaining data exceeds the mtu,
1158 * we know we need more fragment(s).
1159 */
1160 datalen = length + fraggap;
1161 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1162 datalen = maxfraglen - fragheaderlen;
1163
1164 fraglen = datalen + fragheaderlen;
1165 if ((flags & MSG_MORE) &&
1166 !(rt->u.dst.dev->features&NETIF_F_SG))
1167 alloclen = mtu;
1168 else
1169 alloclen = datalen + fragheaderlen;
1170
1171 /*
1172 * The last fragment gets additional space at tail.
1173 * Note: we overallocate on fragments with MSG_MODE
1174 * because we have no idea if we're the last one.
1175 */
1176 if (datalen == length + fraggap)
1177 alloclen += rt->u.dst.trailer_len;
1178
1179 /*
1180 * We just reserve space for fragment header.
1ab1457c 1181 * Note: this may be overallocation if the message
1da177e4
LT
1182 * (without MSG_MORE) fits into the MTU.
1183 */
1184 alloclen += sizeof(struct frag_hdr);
1185
1186 if (transhdrlen) {
1187 skb = sock_alloc_send_skb(sk,
1188 alloclen + hh_len,
1189 (flags & MSG_DONTWAIT), &err);
1190 } else {
1191 skb = NULL;
1192 if (atomic_read(&sk->sk_wmem_alloc) <=
1193 2 * sk->sk_sndbuf)
1194 skb = sock_wmalloc(sk,
1195 alloclen + hh_len, 1,
1196 sk->sk_allocation);
1197 if (unlikely(skb == NULL))
1198 err = -ENOBUFS;
1199 }
1200 if (skb == NULL)
1201 goto error;
1202 /*
1203 * Fill in the control structures
1204 */
1205 skb->ip_summed = csummode;
1206 skb->csum = 0;
1207 /* reserve for fragmentation */
1208 skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1209
1210 /*
1211 * Find where to start putting bytes
1212 */
1213 data = skb_put(skb, fraglen);
c14d2450 1214 skb_set_network_header(skb, exthdrlen);
1da177e4 1215 data += fragheaderlen;
c14d2450 1216 skb->h.raw = skb->nh.raw + fragheaderlen;
1da177e4
LT
1217
1218 if (fraggap) {
1219 skb->csum = skb_copy_and_csum_bits(
1220 skb_prev, maxfraglen,
1221 data + transhdrlen, fraggap, 0);
1222 skb_prev->csum = csum_sub(skb_prev->csum,
1223 skb->csum);
1224 data += fraggap;
e9fa4f7b 1225 pskb_trim_unique(skb_prev, maxfraglen);
1da177e4
LT
1226 }
1227 copy = datalen - transhdrlen - fraggap;
1228 if (copy < 0) {
1229 err = -EINVAL;
1230 kfree_skb(skb);
1231 goto error;
1232 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1233 err = -EFAULT;
1234 kfree_skb(skb);
1235 goto error;
1236 }
1237
1238 offset += copy;
1239 length -= datalen - fraggap;
1240 transhdrlen = 0;
1241 exthdrlen = 0;
1242 csummode = CHECKSUM_NONE;
1243
1244 /*
1245 * Put the packet on the pending queue
1246 */
1247 __skb_queue_tail(&sk->sk_write_queue, skb);
1248 continue;
1249 }
1250
1251 if (copy > length)
1252 copy = length;
1253
1254 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1255 unsigned int off;
1256
1257 off = skb->len;
1258 if (getfrag(from, skb_put(skb, copy),
1259 offset, copy, off, skb) < 0) {
1260 __skb_trim(skb, off);
1261 err = -EFAULT;
1262 goto error;
1263 }
1264 } else {
1265 int i = skb_shinfo(skb)->nr_frags;
1266 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1267 struct page *page = sk->sk_sndmsg_page;
1268 int off = sk->sk_sndmsg_off;
1269 unsigned int left;
1270
1271 if (page && (left = PAGE_SIZE - off) > 0) {
1272 if (copy >= left)
1273 copy = left;
1274 if (page != frag->page) {
1275 if (i == MAX_SKB_FRAGS) {
1276 err = -EMSGSIZE;
1277 goto error;
1278 }
1279 get_page(page);
1280 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1281 frag = &skb_shinfo(skb)->frags[i];
1282 }
1283 } else if(i < MAX_SKB_FRAGS) {
1284 if (copy > PAGE_SIZE)
1285 copy = PAGE_SIZE;
1286 page = alloc_pages(sk->sk_allocation, 0);
1287 if (page == NULL) {
1288 err = -ENOMEM;
1289 goto error;
1290 }
1291 sk->sk_sndmsg_page = page;
1292 sk->sk_sndmsg_off = 0;
1293
1294 skb_fill_page_desc(skb, i, page, 0, 0);
1295 frag = &skb_shinfo(skb)->frags[i];
1296 skb->truesize += PAGE_SIZE;
1297 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1298 } else {
1299 err = -EMSGSIZE;
1300 goto error;
1301 }
1302 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1303 err = -EFAULT;
1304 goto error;
1305 }
1306 sk->sk_sndmsg_off += copy;
1307 frag->size += copy;
1308 skb->len += copy;
1309 skb->data_len += copy;
1310 }
1311 offset += copy;
1312 length -= copy;
1313 }
1314 return 0;
1315error:
1316 inet->cork.length -= length;
a11d206d 1317 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1318 return err;
1319}
1320
1321int ip6_push_pending_frames(struct sock *sk)
1322{
1323 struct sk_buff *skb, *tmp_skb;
1324 struct sk_buff **tail_skb;
1325 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1326 struct inet_sock *inet = inet_sk(sk);
1327 struct ipv6_pinfo *np = inet6_sk(sk);
1328 struct ipv6hdr *hdr;
1329 struct ipv6_txoptions *opt = np->cork.opt;
1330 struct rt6_info *rt = np->cork.rt;
1331 struct flowi *fl = &inet->cork.fl;
1332 unsigned char proto = fl->proto;
1333 int err = 0;
1334
1335 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1336 goto out;
1337 tail_skb = &(skb_shinfo(skb)->frag_list);
1338
1339 /* move skb->data to ip header from ext header */
d56f90a7 1340 if (skb->data < skb_network_header(skb))
bbe735e4 1341 __skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1342 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1343 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1344 *tail_skb = tmp_skb;
1345 tail_skb = &(tmp_skb->next);
1346 skb->len += tmp_skb->len;
1347 skb->data_len += tmp_skb->len;
1da177e4
LT
1348 skb->truesize += tmp_skb->truesize;
1349 __sock_put(tmp_skb->sk);
1350 tmp_skb->destructor = NULL;
1351 tmp_skb->sk = NULL;
1da177e4
LT
1352 }
1353
1354 ipv6_addr_copy(final_dst, &fl->fl6_dst);
1355 __skb_pull(skb, skb->h.raw - skb->nh.raw);
1356 if (opt && opt->opt_flen)
1357 ipv6_push_frag_opts(skb, opt, &proto);
1358 if (opt && opt->opt_nflen)
1359 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1360
e2d1bca7
ACM
1361 skb_push(skb, sizeof(struct ipv6hdr));
1362 skb_reset_network_header(skb);
0660e03f 1363 hdr = ipv6_hdr(skb);
1ab1457c 1364
90bcaf7b 1365 *(__be32*)hdr = fl->fl6_flowlabel |
41a1f8ea 1366 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1da177e4
LT
1367
1368 if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1369 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1370 else
1371 hdr->payload_len = 0;
1372 hdr->hop_limit = np->cork.hop_limit;
1373 hdr->nexthdr = proto;
1374 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1375 ipv6_addr_copy(&hdr->daddr, final_dst);
1376
a2c2064f
PM
1377 skb->priority = sk->sk_priority;
1378
1da177e4 1379 skb->dst = dst_clone(&rt->u.dst);
a11d206d 1380 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
1da177e4
LT
1381 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1382 if (err) {
1383 if (err > 0)
3320da89 1384 err = np->recverr ? net_xmit_errno(err) : 0;
1da177e4
LT
1385 if (err)
1386 goto error;
1387 }
1388
1389out:
1390 inet->cork.flags &= ~IPCORK_OPT;
a51482bd
JJ
1391 kfree(np->cork.opt);
1392 np->cork.opt = NULL;
1da177e4
LT
1393 if (np->cork.rt) {
1394 dst_release(&np->cork.rt->u.dst);
1395 np->cork.rt = NULL;
1396 inet->cork.flags &= ~IPCORK_ALLFRAG;
1397 }
1398 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1399 return err;
1400error:
1401 goto out;
1402}
1403
1404void ip6_flush_pending_frames(struct sock *sk)
1405{
1406 struct inet_sock *inet = inet_sk(sk);
1407 struct ipv6_pinfo *np = inet6_sk(sk);
1408 struct sk_buff *skb;
1409
1410 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
a11d206d
YH
1411 IP6_INC_STATS(ip6_dst_idev(skb->dst),
1412 IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1413 kfree_skb(skb);
1414 }
1415
1416 inet->cork.flags &= ~IPCORK_OPT;
1417
a51482bd
JJ
1418 kfree(np->cork.opt);
1419 np->cork.opt = NULL;
1da177e4
LT
1420 if (np->cork.rt) {
1421 dst_release(&np->cork.rt->u.dst);
1422 np->cork.rt = NULL;
1423 inet->cork.flags &= ~IPCORK_ALLFRAG;
1424 }
1425 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1426}