Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / ip_output.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
e905a9ed 23 * Bradford Johnson: Fix faulty handling of some frames when
1da177e4
LT
24 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
e905a9ed
YH
34 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
1da177e4
LT
37 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
45#include <asm/uaccess.h>
46#include <asm/system.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
1da177e4
LT
50#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
a1f8e7f7 53#include <linux/highmem.h>
5a0e3ad6 54#include <linux/slab.h>
1da177e4
LT
55
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
cfacb057 70#include <net/xfrm.h>
1da177e4
LT
71#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
1da177e4
LT
75#include <net/checksum.h>
76#include <net/inetpeer.h>
1da177e4
LT
77#include <linux/igmp.h>
78#include <linux/netfilter_ipv4.h>
79#include <linux/netfilter_bridge.h>
80#include <linux/mroute.h>
81#include <linux/netlink.h>
6cbb0df7 82#include <linux/tcp.h>
1da177e4 83
ab32ea5d 84int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
323e126f 85EXPORT_SYMBOL(sysctl_ip_default_ttl);
1da177e4
LT
86
87/* Generate a checksum for an outgoing IP datagram. */
88__inline__ void ip_send_check(struct iphdr *iph)
89{
90 iph->check = 0;
91 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
92}
4bc2f18b 93EXPORT_SYMBOL(ip_send_check);
1da177e4 94
c439cb2e
HX
95int __ip_local_out(struct sk_buff *skb)
96{
97 struct iphdr *iph = ip_hdr(skb);
98
99 iph->tot_len = htons(skb->len);
100 ip_send_check(iph);
9bbc768a
JE
101 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
102 skb_dst(skb)->dev, dst_output);
c439cb2e
HX
103}
104
105int ip_local_out(struct sk_buff *skb)
106{
107 int err;
108
109 err = __ip_local_out(skb);
110 if (likely(err == 1))
111 err = dst_output(skb);
112
113 return err;
114}
115EXPORT_SYMBOL_GPL(ip_local_out);
116
1da177e4
LT
117/* dev_loopback_xmit for use with netfilter. */
118static int ip_dev_loopback_xmit(struct sk_buff *newskb)
119{
459a98ed 120 skb_reset_mac_header(newskb);
bbe735e4 121 __skb_pull(newskb, skb_network_offset(newskb));
1da177e4
LT
122 newskb->pkt_type = PACKET_LOOPBACK;
123 newskb->ip_summed = CHECKSUM_UNNECESSARY;
adf30907 124 WARN_ON(!skb_dst(newskb));
e30b38c2 125 netif_rx_ni(newskb);
1da177e4
LT
126 return 0;
127}
128
129static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
130{
131 int ttl = inet->uc_ttl;
132
133 if (ttl < 0)
323e126f 134 ttl = ip4_dst_hoplimit(dst);
1da177e4
LT
135 return ttl;
136}
137
e905a9ed 138/*
1da177e4
LT
139 * Add an ip header to a skbuff and send it out.
140 *
141 */
142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
f6d8bd05 143 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
1da177e4
LT
144{
145 struct inet_sock *inet = inet_sk(sk);
511c3f92 146 struct rtable *rt = skb_rtable(skb);
1da177e4
LT
147 struct iphdr *iph;
148
149 /* Build the IP header. */
f6d8bd05 150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
8856dfa3 151 skb_reset_network_header(skb);
eddc9ec5 152 iph = ip_hdr(skb);
1da177e4
LT
153 iph->version = 4;
154 iph->ihl = 5;
155 iph->tos = inet->tos;
d8d1f30b 156 if (ip_dont_fragment(sk, &rt->dst))
1da177e4
LT
157 iph->frag_off = htons(IP_DF);
158 else
159 iph->frag_off = 0;
d8d1f30b 160 iph->ttl = ip_select_ttl(inet, &rt->dst);
dd927a26
DM
161 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->saddr = saddr;
1da177e4 163 iph->protocol = sk->sk_protocol;
d8d1f30b 164 ip_select_ident(iph, &rt->dst, sk);
1da177e4 165
f6d8bd05
ED
166 if (opt && opt->opt.optlen) {
167 iph->ihl += opt->opt.optlen>>2;
168 ip_options_build(skb, &opt->opt, daddr, rt, 0);
1da177e4 169 }
1da177e4
LT
170
171 skb->priority = sk->sk_priority;
4a19ec58 172 skb->mark = sk->sk_mark;
1da177e4
LT
173
174 /* Send it out. */
c439cb2e 175 return ip_local_out(skb);
1da177e4 176}
d8c97a94
ACM
177EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
178
1da177e4
LT
179static inline int ip_finish_output2(struct sk_buff *skb)
180{
adf30907 181 struct dst_entry *dst = skb_dst(skb);
80787ebc 182 struct rtable *rt = (struct rtable *)dst;
1da177e4 183 struct net_device *dev = dst->dev;
c2636b4d 184 unsigned int hh_len = LL_RESERVED_SPACE(dev);
1da177e4 185
edf391ff
NH
186 if (rt->rt_type == RTN_MULTICAST) {
187 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
188 } else if (rt->rt_type == RTN_BROADCAST)
189 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
80787ebc 190
1da177e4 191 /* Be paranoid, rather than too clever. */
3b04ddde 192 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
1da177e4
LT
193 struct sk_buff *skb2;
194
195 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
196 if (skb2 == NULL) {
197 kfree_skb(skb);
198 return -ENOMEM;
199 }
200 if (skb->sk)
201 skb_set_owner_w(skb2, skb->sk);
202 kfree_skb(skb);
203 skb = skb2;
204 }
205
3644f0ce
SH
206 if (dst->hh)
207 return neigh_hh_output(dst->hh, skb);
208 else if (dst->neighbour)
1da177e4
LT
209 return dst->neighbour->output(skb);
210
211 if (net_ratelimit())
212 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
213 kfree_skb(skb);
214 return -EINVAL;
215}
216
628a5c56
JH
217static inline int ip_skb_dst_mtu(struct sk_buff *skb)
218{
219 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
220
221 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
adf30907 222 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
628a5c56
JH
223}
224
861d0486 225static int ip_finish_output(struct sk_buff *skb)
1da177e4 226{
5c901daa
PM
227#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
228 /* Policy lookup after SNAT yielded a new policy */
adf30907 229 if (skb_dst(skb)->xfrm != NULL) {
48d5cad8
PM
230 IPCB(skb)->flags |= IPSKB_REROUTED;
231 return dst_output(skb);
232 }
5c901daa 233#endif
628a5c56 234 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
1bd9bef6
PM
235 return ip_fragment(skb, ip_finish_output2);
236 else
237 return ip_finish_output2(skb);
1da177e4
LT
238}
239
240int ip_mc_output(struct sk_buff *skb)
241{
242 struct sock *sk = skb->sk;
511c3f92 243 struct rtable *rt = skb_rtable(skb);
d8d1f30b 244 struct net_device *dev = rt->dst.dev;
1da177e4
LT
245
246 /*
247 * If the indicated interface is up and running, send the packet.
248 */
edf391ff 249 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
1da177e4
LT
250
251 skb->dev = dev;
252 skb->protocol = htons(ETH_P_IP);
253
254 /*
255 * Multicasts are looped back for other local users
256 */
257
258 if (rt->rt_flags&RTCF_MULTICAST) {
7ad6848c 259 if (sk_mc_loop(sk)
1da177e4
LT
260#ifdef CONFIG_IP_MROUTE
261 /* Small optimization: do not loopback not local frames,
262 which returned after forwarding; they will be dropped
263 by ip_mr_input in any case.
264 Note, that local frames are looped back to be delivered
265 to local recipients.
266
267 This check is duplicated in ip_mr_input at the moment.
268 */
9d4fb27d
JP
269 &&
270 ((rt->rt_flags & RTCF_LOCAL) ||
271 !(IPCB(skb)->flags & IPSKB_FORWARDED))
1da177e4 272#endif
9d4fb27d 273 ) {
1da177e4
LT
274 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
275 if (newskb)
9bbc768a
JE
276 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
277 newskb, NULL, newskb->dev,
1da177e4
LT
278 ip_dev_loopback_xmit);
279 }
280
281 /* Multicasts with ttl 0 must not go beyond the host */
282
eddc9ec5 283 if (ip_hdr(skb)->ttl == 0) {
1da177e4
LT
284 kfree_skb(skb);
285 return 0;
286 }
287 }
288
289 if (rt->rt_flags&RTCF_BROADCAST) {
290 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
291 if (newskb)
9bbc768a
JE
292 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
293 NULL, newskb->dev, ip_dev_loopback_xmit);
1da177e4
LT
294 }
295
9bbc768a
JE
296 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
297 skb->dev, ip_finish_output,
48d5cad8 298 !(IPCB(skb)->flags & IPSKB_REROUTED));
1da177e4
LT
299}
300
301int ip_output(struct sk_buff *skb)
302{
adf30907 303 struct net_device *dev = skb_dst(skb)->dev;
1bd9bef6 304
edf391ff 305 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
1da177e4 306
1bd9bef6
PM
307 skb->dev = dev;
308 skb->protocol = htons(ETH_P_IP);
309
9bbc768a 310 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
e905a9ed 311 ip_finish_output,
48d5cad8 312 !(IPCB(skb)->flags & IPSKB_REROUTED));
1da177e4
LT
313}
314
d9d8da80 315int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
1da177e4 316{
e89862f4 317 struct sock *sk = skb->sk;
1da177e4 318 struct inet_sock *inet = inet_sk(sk);
f6d8bd05 319 struct ip_options_rcu *inet_opt;
b57ae01a 320 struct flowi4 *fl4;
1da177e4
LT
321 struct rtable *rt;
322 struct iphdr *iph;
ab6e3feb 323 int res;
1da177e4
LT
324
325 /* Skip all of this if the packet is already routed,
326 * f.e. by something like SCTP.
327 */
ab6e3feb 328 rcu_read_lock();
f6d8bd05 329 inet_opt = rcu_dereference(inet->inet_opt);
ea4fc0d6 330 fl4 = &fl->u.ip4;
511c3f92 331 rt = skb_rtable(skb);
1da177e4
LT
332 if (rt != NULL)
333 goto packet_routed;
334
335 /* Make sure we can route this packet. */
336 rt = (struct rtable *)__sk_dst_check(sk, 0);
337 if (rt == NULL) {
3ca3c68e 338 __be32 daddr;
1da177e4
LT
339
340 /* Use correct destination address if we have options. */
c720c7e8 341 daddr = inet->inet_daddr;
f6d8bd05
ED
342 if (inet_opt && inet_opt->opt.srr)
343 daddr = inet_opt->opt.faddr;
1da177e4 344
78fbfd8a
DM
345 /* If this fails, retransmit mechanism of transport layer will
346 * keep trying until route appears or the connection times
347 * itself out.
348 */
b57ae01a 349 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
78fbfd8a
DM
350 daddr, inet->inet_saddr,
351 inet->inet_dport,
352 inet->inet_sport,
353 sk->sk_protocol,
354 RT_CONN_FLAGS(sk),
355 sk->sk_bound_dev_if);
356 if (IS_ERR(rt))
357 goto no_route;
d8d1f30b 358 sk_setup_caps(sk, &rt->dst);
1da177e4 359 }
d8d1f30b 360 skb_dst_set_noref(skb, &rt->dst);
1da177e4
LT
361
362packet_routed:
ea4fc0d6 363 if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
1da177e4
LT
364 goto no_route;
365
366 /* OK, we know where to send it, allocate and build IP header. */
f6d8bd05 367 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
8856dfa3 368 skb_reset_network_header(skb);
eddc9ec5 369 iph = ip_hdr(skb);
714e85be 370 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
d8d1f30b 371 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
1da177e4
LT
372 iph->frag_off = htons(IP_DF);
373 else
374 iph->frag_off = 0;
d8d1f30b 375 iph->ttl = ip_select_ttl(inet, &rt->dst);
1da177e4 376 iph->protocol = sk->sk_protocol;
ea4fc0d6
DM
377 iph->saddr = fl4->saddr;
378 iph->daddr = fl4->daddr;
1da177e4
LT
379 /* Transport layer set skb->h.foo itself. */
380
f6d8bd05
ED
381 if (inet_opt && inet_opt->opt.optlen) {
382 iph->ihl += inet_opt->opt.optlen >> 2;
383 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
1da177e4
LT
384 }
385
d8d1f30b 386 ip_select_ident_more(iph, &rt->dst, sk,
7967168c 387 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
1da177e4 388
1da177e4 389 skb->priority = sk->sk_priority;
4a19ec58 390 skb->mark = sk->sk_mark;
1da177e4 391
ab6e3feb
ED
392 res = ip_local_out(skb);
393 rcu_read_unlock();
394 return res;
1da177e4
LT
395
396no_route:
ab6e3feb 397 rcu_read_unlock();
5e38e270 398 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
1da177e4
LT
399 kfree_skb(skb);
400 return -EHOSTUNREACH;
401}
4bc2f18b 402EXPORT_SYMBOL(ip_queue_xmit);
1da177e4
LT
403
404
405static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
406{
407 to->pkt_type = from->pkt_type;
408 to->priority = from->priority;
409 to->protocol = from->protocol;
adf30907 410 skb_dst_drop(to);
fe76cda3 411 skb_dst_copy(to, from);
1da177e4 412 to->dev = from->dev;
82e91ffe 413 to->mark = from->mark;
1da177e4
LT
414
415 /* Copy the flags to each fragment. */
416 IPCB(to)->flags = IPCB(from)->flags;
417
418#ifdef CONFIG_NET_SCHED
419 to->tc_index = from->tc_index;
420#endif
e7ac05f3 421 nf_copy(to, from);
ba9dda3a
JK
422#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
423 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
424 to->nf_trace = from->nf_trace;
425#endif
c98d80ed
JA
426#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
427 to->ipvs_property = from->ipvs_property;
1da177e4 428#endif
984bc16c 429 skb_copy_secmark(to, from);
1da177e4
LT
430}
431
432/*
433 * This IP datagram is too large to be sent in one piece. Break it up into
434 * smaller pieces (each of size equal to IP header plus
435 * a block of the data of the original IP data part) that will yet fit in a
436 * single device frame, and queue such a frame for sending.
437 */
438
d9319100 439int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1da177e4
LT
440{
441 struct iphdr *iph;
1da177e4
LT
442 int ptr;
443 struct net_device *dev;
444 struct sk_buff *skb2;
c893b806 445 unsigned int mtu, hlen, left, len, ll_rs;
1da177e4 446 int offset;
76ab608d 447 __be16 not_last_frag;
511c3f92 448 struct rtable *rt = skb_rtable(skb);
1da177e4
LT
449 int err = 0;
450
d8d1f30b 451 dev = rt->dst.dev;
1da177e4
LT
452
453 /*
454 * Point into the IP datagram header.
455 */
456
eddc9ec5 457 iph = ip_hdr(skb);
1da177e4
LT
458
459 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
5e38e270 460 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1da177e4 461 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
628a5c56 462 htonl(ip_skb_dst_mtu(skb)));
1da177e4
LT
463 kfree_skb(skb);
464 return -EMSGSIZE;
465 }
466
467 /*
468 * Setup starting values.
469 */
470
471 hlen = iph->ihl * 4;
d8d1f30b 472 mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
6c79bf0f
BDS
473#ifdef CONFIG_BRIDGE_NETFILTER
474 if (skb->nf_bridge)
475 mtu -= nf_bridge_mtu_reduction(skb);
476#endif
89cee8b1 477 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
1da177e4
LT
478
479 /* When frag_list is given, use it. First, check its validity:
480 * some transformers could create wrong frag_list or break existing
481 * one, it is not prohibited. In this case fall back to copying.
482 *
483 * LATER: this step can be merged to real generation of fragments,
484 * we can switch to copy when see the first bad fragment.
485 */
21dc3301 486 if (skb_has_frag_list(skb)) {
3d13008e 487 struct sk_buff *frag, *frag2;
1da177e4
LT
488 int first_len = skb_pagelen(skb);
489
490 if (first_len - hlen > mtu ||
491 ((first_len - hlen) & 7) ||
492 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
493 skb_cloned(skb))
494 goto slow_path;
495
d7fcf1a5 496 skb_walk_frags(skb, frag) {
1da177e4
LT
497 /* Correct geometry. */
498 if (frag->len > mtu ||
499 ((frag->len & 7) && frag->next) ||
500 skb_headroom(frag) < hlen)
3d13008e 501 goto slow_path_clean;
1da177e4
LT
502
503 /* Partially cloned skb? */
504 if (skb_shared(frag))
3d13008e 505 goto slow_path_clean;
2fdba6b0
HX
506
507 BUG_ON(frag->sk);
508 if (skb->sk) {
2fdba6b0
HX
509 frag->sk = skb->sk;
510 frag->destructor = sock_wfree;
2fdba6b0 511 }
3d13008e 512 skb->truesize -= frag->truesize;
1da177e4
LT
513 }
514
515 /* Everything is OK. Generate! */
516
517 err = 0;
518 offset = 0;
519 frag = skb_shinfo(skb)->frag_list;
d7fcf1a5 520 skb_frag_list_init(skb);
1da177e4
LT
521 skb->data_len = first_len - skb_headlen(skb);
522 skb->len = first_len;
523 iph->tot_len = htons(first_len);
524 iph->frag_off = htons(IP_MF);
525 ip_send_check(iph);
526
527 for (;;) {
528 /* Prepare header of the next frame,
529 * before previous one went down. */
530 if (frag) {
531 frag->ip_summed = CHECKSUM_NONE;
badff6d0 532 skb_reset_transport_header(frag);
e2d1bca7
ACM
533 __skb_push(frag, hlen);
534 skb_reset_network_header(frag);
d56f90a7 535 memcpy(skb_network_header(frag), iph, hlen);
eddc9ec5 536 iph = ip_hdr(frag);
1da177e4
LT
537 iph->tot_len = htons(frag->len);
538 ip_copy_metadata(frag, skb);
539 if (offset == 0)
540 ip_options_fragment(frag);
541 offset += skb->len - hlen;
542 iph->frag_off = htons(offset>>3);
543 if (frag->next != NULL)
544 iph->frag_off |= htons(IP_MF);
545 /* Ready, complete checksum */
546 ip_send_check(iph);
547 }
548
549 err = output(skb);
550
dafee490 551 if (!err)
5e38e270 552 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
1da177e4
LT
553 if (err || !frag)
554 break;
555
556 skb = frag;
557 frag = skb->next;
558 skb->next = NULL;
559 }
560
561 if (err == 0) {
5e38e270 562 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
1da177e4
LT
563 return 0;
564 }
565
566 while (frag) {
567 skb = frag->next;
568 kfree_skb(frag);
569 frag = skb;
570 }
5e38e270 571 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1da177e4 572 return err;
3d13008e
ED
573
574slow_path_clean:
575 skb_walk_frags(skb, frag2) {
576 if (frag2 == frag)
577 break;
578 frag2->sk = NULL;
579 frag2->destructor = NULL;
580 skb->truesize += frag2->truesize;
581 }
1da177e4
LT
582 }
583
584slow_path:
585 left = skb->len - hlen; /* Space per frame */
49085bd7 586 ptr = hlen; /* Where to start from */
1da177e4 587
1da177e4 588 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
9bcfcaf5
SH
589 * we need to make room for the encapsulating header
590 */
c893b806 591 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
9bcfcaf5 592
1da177e4
LT
593 /*
594 * Fragment the datagram.
595 */
596
597 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
598 not_last_frag = iph->frag_off & htons(IP_MF);
599
600 /*
601 * Keep copying data until we run out.
602 */
603
132adf54 604 while (left > 0) {
1da177e4
LT
605 len = left;
606 /* IF: it doesn't fit, use 'mtu' - the data space left */
607 if (len > mtu)
608 len = mtu;
25985edc 609 /* IF: we are not sending up to and including the packet end
1da177e4
LT
610 then align the next start on an eight byte boundary */
611 if (len < left) {
612 len &= ~7;
613 }
614 /*
615 * Allocate buffer.
616 */
617
618 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
64ce2073 619 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
1da177e4
LT
620 err = -ENOMEM;
621 goto fail;
622 }
623
624 /*
625 * Set up data on packet
626 */
627
628 ip_copy_metadata(skb2, skb);
629 skb_reserve(skb2, ll_rs);
630 skb_put(skb2, len + hlen);
c1d2bbe1 631 skb_reset_network_header(skb2);
b0e380b1 632 skb2->transport_header = skb2->network_header + hlen;
1da177e4
LT
633
634 /*
635 * Charge the memory for the fragment to any owner
636 * it might possess
637 */
638
639 if (skb->sk)
640 skb_set_owner_w(skb2, skb->sk);
641
642 /*
643 * Copy the packet header into the new buffer.
644 */
645
d626f62b 646 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
1da177e4
LT
647
648 /*
649 * Copy a block of the IP datagram.
650 */
bff9b61c 651 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
1da177e4
LT
652 BUG();
653 left -= len;
654
655 /*
656 * Fill in the new header fields.
657 */
eddc9ec5 658 iph = ip_hdr(skb2);
1da177e4
LT
659 iph->frag_off = htons((offset >> 3));
660
661 /* ANK: dirty, but effective trick. Upgrade options only if
662 * the segment to be fragmented was THE FIRST (otherwise,
663 * options are already fixed) and make it ONCE
664 * on the initial skb, so that all the following fragments
665 * will inherit fixed options.
666 */
667 if (offset == 0)
668 ip_options_fragment(skb);
669
670 /*
671 * Added AC : If we are fragmenting a fragment that's not the
672 * last fragment then keep MF on each bit
673 */
674 if (left > 0 || not_last_frag)
675 iph->frag_off |= htons(IP_MF);
676 ptr += len;
677 offset += len;
678
679 /*
680 * Put this fragment into the sending queue.
681 */
1da177e4
LT
682 iph->tot_len = htons(len + hlen);
683
684 ip_send_check(iph);
685
686 err = output(skb2);
687 if (err)
688 goto fail;
dafee490 689
5e38e270 690 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
1da177e4
LT
691 }
692 kfree_skb(skb);
5e38e270 693 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
1da177e4
LT
694 return err;
695
696fail:
e905a9ed 697 kfree_skb(skb);
5e38e270 698 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
699 return err;
700}
2e2f7aef
PM
701EXPORT_SYMBOL(ip_fragment);
702
1da177e4
LT
703int
704ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
705{
706 struct iovec *iov = from;
707
84fa7933 708 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
709 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
710 return -EFAULT;
711 } else {
44bb9363 712 __wsum csum = 0;
1da177e4
LT
713 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
714 return -EFAULT;
715 skb->csum = csum_block_add(skb->csum, csum, odd);
716 }
717 return 0;
718}
4bc2f18b 719EXPORT_SYMBOL(ip_generic_getfrag);
1da177e4 720
44bb9363 721static inline __wsum
1da177e4
LT
722csum_page(struct page *page, int offset, int copy)
723{
724 char *kaddr;
44bb9363 725 __wsum csum;
1da177e4
LT
726 kaddr = kmap(page);
727 csum = csum_partial(kaddr + offset, copy, 0);
728 kunmap(page);
729 return csum;
730}
731
4b30b1c6 732static inline int ip_ufo_append_data(struct sock *sk,
1470ddf7 733 struct sk_buff_head *queue,
e89e9cf5
AR
734 int getfrag(void *from, char *to, int offset, int len,
735 int odd, struct sk_buff *skb),
736 void *from, int length, int hh_len, int fragheaderlen,
d9319100 737 int transhdrlen, int mtu, unsigned int flags)
e89e9cf5
AR
738{
739 struct sk_buff *skb;
740 int err;
741
742 /* There is support for UDP fragmentation offload by network
743 * device, so create one single skb packet containing complete
744 * udp datagram
745 */
1470ddf7 746 if ((skb = skb_peek_tail(queue)) == NULL) {
e89e9cf5
AR
747 skb = sock_alloc_send_skb(sk,
748 hh_len + fragheaderlen + transhdrlen + 20,
749 (flags & MSG_DONTWAIT), &err);
750
751 if (skb == NULL)
752 return err;
753
754 /* reserve space for Hardware header */
755 skb_reserve(skb, hh_len);
756
757 /* create space for UDP/IP header */
d9319100 758 skb_put(skb, fragheaderlen + transhdrlen);
e89e9cf5
AR
759
760 /* initialize network header pointer */
c1d2bbe1 761 skb_reset_network_header(skb);
e89e9cf5
AR
762
763 /* initialize protocol header pointer */
b0e380b1 764 skb->transport_header = skb->network_header + fragheaderlen;
e89e9cf5 765
84fa7933 766 skb->ip_summed = CHECKSUM_PARTIAL;
e89e9cf5 767 skb->csum = 0;
e89e9cf5 768
be9164e7 769 /* specify the length of each IP datagram fragment */
7967168c 770 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
f83ef8c0 771 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1470ddf7 772 __skb_queue_tail(queue, skb);
e89e9cf5 773 }
be9164e7
K
774
775 return skb_append_datato_frags(sk, skb, getfrag, from,
776 (length - transhdrlen));
e89e9cf5
AR
777}
778
f5fca608
DM
779static int __ip_append_data(struct sock *sk,
780 struct flowi4 *fl4,
781 struct sk_buff_head *queue,
1470ddf7
HX
782 struct inet_cork *cork,
783 int getfrag(void *from, char *to, int offset,
784 int len, int odd, struct sk_buff *skb),
785 void *from, int length, int transhdrlen,
786 unsigned int flags)
1da177e4
LT
787{
788 struct inet_sock *inet = inet_sk(sk);
789 struct sk_buff *skb;
790
07df5294 791 struct ip_options *opt = cork->opt;
1da177e4
LT
792 int hh_len;
793 int exthdrlen;
794 int mtu;
795 int copy;
796 int err;
797 int offset = 0;
798 unsigned int maxfraglen, fragheaderlen;
799 int csummode = CHECKSUM_NONE;
1470ddf7 800 struct rtable *rt = (struct rtable *)cork->dst;
1da177e4 801
1470ddf7
HX
802 exthdrlen = transhdrlen ? rt->dst.header_len : 0;
803 length += exthdrlen;
804 transhdrlen += exthdrlen;
07df5294 805 mtu = cork->fragsize;
1da177e4 806
d8d1f30b 807 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1da177e4
LT
808
809 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
810 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
811
1470ddf7 812 if (cork->length + length > 0xFFFF - fragheaderlen) {
f5fca608 813 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
c720c7e8 814 mtu-exthdrlen);
1da177e4
LT
815 return -EMSGSIZE;
816 }
817
818 /*
819 * transhdrlen > 0 means that this is the first fragment and we wish
820 * it won't be fragmented in the future.
821 */
822 if (transhdrlen &&
823 length + fragheaderlen <= mtu &&
d8d1f30b 824 rt->dst.dev->features & NETIF_F_V4_CSUM &&
1da177e4 825 !exthdrlen)
84fa7933 826 csummode = CHECKSUM_PARTIAL;
1da177e4 827
1470ddf7 828 skb = skb_peek_tail(queue);
26cde9f7 829
1470ddf7 830 cork->length += length;
26cde9f7 831 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
be9164e7 832 (sk->sk_protocol == IPPROTO_UDP) &&
d8d1f30b 833 (rt->dst.dev->features & NETIF_F_UFO)) {
1470ddf7
HX
834 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
835 hh_len, fragheaderlen, transhdrlen,
836 mtu, flags);
baa829d8 837 if (err)
e89e9cf5 838 goto error;
e89e9cf5
AR
839 return 0;
840 }
1da177e4
LT
841
842 /* So, what's going on in the loop below?
843 *
844 * We use calculated fragment length to generate chained skb,
845 * each of segments is IP fragment ready for sending to network after
846 * adding appropriate IP header.
847 */
848
26cde9f7 849 if (!skb)
1da177e4
LT
850 goto alloc_new_skb;
851
852 while (length > 0) {
853 /* Check if the remaining data fits into current packet. */
854 copy = mtu - skb->len;
855 if (copy < length)
856 copy = maxfraglen - skb->len;
857 if (copy <= 0) {
858 char *data;
859 unsigned int datalen;
860 unsigned int fraglen;
861 unsigned int fraggap;
862 unsigned int alloclen;
863 struct sk_buff *skb_prev;
864alloc_new_skb:
865 skb_prev = skb;
866 if (skb_prev)
867 fraggap = skb_prev->len - maxfraglen;
868 else
869 fraggap = 0;
870
871 /*
872 * If remaining data exceeds the mtu,
873 * we know we need more fragment(s).
874 */
875 datalen = length + fraggap;
876 if (datalen > mtu - fragheaderlen)
877 datalen = maxfraglen - fragheaderlen;
878 fraglen = datalen + fragheaderlen;
879
e905a9ed 880 if ((flags & MSG_MORE) &&
d8d1f30b 881 !(rt->dst.dev->features&NETIF_F_SG))
1da177e4
LT
882 alloclen = mtu;
883 else
59104f06 884 alloclen = fraglen;
1da177e4
LT
885
886 /* The last fragment gets additional space at tail.
887 * Note, with MSG_MORE we overallocate on fragments,
888 * because we have no idea what fragment will be
889 * the last.
890 */
59104f06 891 if (datalen == length + fraggap) {
d8d1f30b 892 alloclen += rt->dst.trailer_len;
59104f06
ED
893 /* make sure mtu is not reached */
894 if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
895 datalen -= ALIGN(rt->dst.trailer_len, 8);
896 }
1da177e4 897 if (transhdrlen) {
e905a9ed 898 skb = sock_alloc_send_skb(sk,
1da177e4
LT
899 alloclen + hh_len + 15,
900 (flags & MSG_DONTWAIT), &err);
901 } else {
902 skb = NULL;
903 if (atomic_read(&sk->sk_wmem_alloc) <=
904 2 * sk->sk_sndbuf)
e905a9ed 905 skb = sock_wmalloc(sk,
1da177e4
LT
906 alloclen + hh_len + 15, 1,
907 sk->sk_allocation);
908 if (unlikely(skb == NULL))
909 err = -ENOBUFS;
51f31cab
PO
910 else
911 /* only the initial fragment is
912 time stamped */
1470ddf7 913 cork->tx_flags = 0;
1da177e4
LT
914 }
915 if (skb == NULL)
916 goto error;
917
918 /*
919 * Fill in the control structures
920 */
921 skb->ip_summed = csummode;
922 skb->csum = 0;
923 skb_reserve(skb, hh_len);
1470ddf7 924 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1da177e4
LT
925
926 /*
927 * Find where to start putting bytes.
928 */
929 data = skb_put(skb, fraglen);
c14d2450 930 skb_set_network_header(skb, exthdrlen);
b0e380b1
ACM
931 skb->transport_header = (skb->network_header +
932 fragheaderlen);
1da177e4 933 data += fragheaderlen;
1da177e4
LT
934
935 if (fraggap) {
936 skb->csum = skb_copy_and_csum_bits(
937 skb_prev, maxfraglen,
938 data + transhdrlen, fraggap, 0);
939 skb_prev->csum = csum_sub(skb_prev->csum,
940 skb->csum);
941 data += fraggap;
e9fa4f7b 942 pskb_trim_unique(skb_prev, maxfraglen);
1da177e4
LT
943 }
944
945 copy = datalen - transhdrlen - fraggap;
946 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
947 err = -EFAULT;
948 kfree_skb(skb);
949 goto error;
950 }
951
952 offset += copy;
953 length -= datalen - fraggap;
954 transhdrlen = 0;
955 exthdrlen = 0;
956 csummode = CHECKSUM_NONE;
957
958 /*
959 * Put the packet on the pending queue.
960 */
1470ddf7 961 __skb_queue_tail(queue, skb);
1da177e4
LT
962 continue;
963 }
964
965 if (copy > length)
966 copy = length;
967
d8d1f30b 968 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1da177e4
LT
969 unsigned int off;
970
971 off = skb->len;
e905a9ed 972 if (getfrag(from, skb_put(skb, copy),
1da177e4
LT
973 offset, copy, off, skb) < 0) {
974 __skb_trim(skb, off);
975 err = -EFAULT;
976 goto error;
977 }
978 } else {
979 int i = skb_shinfo(skb)->nr_frags;
980 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1470ddf7
HX
981 struct page *page = cork->page;
982 int off = cork->off;
1da177e4
LT
983 unsigned int left;
984
985 if (page && (left = PAGE_SIZE - off) > 0) {
986 if (copy >= left)
987 copy = left;
988 if (page != frag->page) {
989 if (i == MAX_SKB_FRAGS) {
990 err = -EMSGSIZE;
991 goto error;
992 }
993 get_page(page);
1470ddf7 994 skb_fill_page_desc(skb, i, page, off, 0);
1da177e4
LT
995 frag = &skb_shinfo(skb)->frags[i];
996 }
997 } else if (i < MAX_SKB_FRAGS) {
998 if (copy > PAGE_SIZE)
999 copy = PAGE_SIZE;
1000 page = alloc_pages(sk->sk_allocation, 0);
1001 if (page == NULL) {
1002 err = -ENOMEM;
1003 goto error;
1004 }
1470ddf7
HX
1005 cork->page = page;
1006 cork->off = 0;
1da177e4
LT
1007
1008 skb_fill_page_desc(skb, i, page, 0, 0);
1009 frag = &skb_shinfo(skb)->frags[i];
1da177e4
LT
1010 } else {
1011 err = -EMSGSIZE;
1012 goto error;
1013 }
1014 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1015 err = -EFAULT;
1016 goto error;
1017 }
1470ddf7 1018 cork->off += copy;
1da177e4
LT
1019 frag->size += copy;
1020 skb->len += copy;
1021 skb->data_len += copy;
f945fa7a
HX
1022 skb->truesize += copy;
1023 atomic_add(copy, &sk->sk_wmem_alloc);
1da177e4
LT
1024 }
1025 offset += copy;
1026 length -= copy;
1027 }
1028
1029 return 0;
1030
1031error:
1470ddf7 1032 cork->length -= length;
5e38e270 1033 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
e905a9ed 1034 return err;
1da177e4
LT
1035}
1036
1470ddf7
HX
1037static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1038 struct ipcm_cookie *ipc, struct rtable **rtp)
1039{
1040 struct inet_sock *inet = inet_sk(sk);
f6d8bd05 1041 struct ip_options_rcu *opt;
1470ddf7
HX
1042 struct rtable *rt;
1043
1044 /*
1045 * setup for corking.
1046 */
1047 opt = ipc->opt;
1048 if (opt) {
1049 if (cork->opt == NULL) {
1050 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1051 sk->sk_allocation);
1052 if (unlikely(cork->opt == NULL))
1053 return -ENOBUFS;
1054 }
f6d8bd05 1055 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1470ddf7
HX
1056 cork->flags |= IPCORK_OPT;
1057 cork->addr = ipc->addr;
1058 }
1059 rt = *rtp;
1060 if (unlikely(!rt))
1061 return -EFAULT;
1062 /*
1063 * We steal reference to this route, caller should not release it
1064 */
1065 *rtp = NULL;
1066 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
1067 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1068 cork->dst = &rt->dst;
1069 cork->length = 0;
1070 cork->tx_flags = ipc->tx_flags;
1071 cork->page = NULL;
1072 cork->off = 0;
1073
1074 return 0;
1075}
1076
1077/*
1078 * ip_append_data() and ip_append_page() can make one large IP datagram
1079 * from many pieces of data. Each pieces will be holded on the socket
1080 * until ip_push_pending_frames() is called. Each piece can be a page
1081 * or non-page data.
1082 *
1083 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1084 * this interface potentially.
1085 *
1086 * LATER: length must be adjusted by pad at tail, when it is required.
1087 */
f5fca608 1088int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1470ddf7
HX
1089 int getfrag(void *from, char *to, int offset, int len,
1090 int odd, struct sk_buff *skb),
1091 void *from, int length, int transhdrlen,
1092 struct ipcm_cookie *ipc, struct rtable **rtp,
1093 unsigned int flags)
1094{
1095 struct inet_sock *inet = inet_sk(sk);
1096 int err;
1097
1098 if (flags&MSG_PROBE)
1099 return 0;
1100
1101 if (skb_queue_empty(&sk->sk_write_queue)) {
bdc712b4 1102 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1470ddf7
HX
1103 if (err)
1104 return err;
1105 } else {
1106 transhdrlen = 0;
1107 }
1108
f5fca608 1109 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
1470ddf7
HX
1110 from, length, transhdrlen, flags);
1111}
1112
f5fca608 1113ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1da177e4
LT
1114 int offset, size_t size, int flags)
1115{
1116 struct inet_sock *inet = inet_sk(sk);
1117 struct sk_buff *skb;
1118 struct rtable *rt;
1119 struct ip_options *opt = NULL;
bdc712b4 1120 struct inet_cork *cork;
1da177e4
LT
1121 int hh_len;
1122 int mtu;
1123 int len;
1124 int err;
1125 unsigned int maxfraglen, fragheaderlen, fraggap;
1126
1127 if (inet->hdrincl)
1128 return -EPERM;
1129
1130 if (flags&MSG_PROBE)
1131 return 0;
1132
1133 if (skb_queue_empty(&sk->sk_write_queue))
1134 return -EINVAL;
1135
bdc712b4
DM
1136 cork = &inet->cork.base;
1137 rt = (struct rtable *)cork->dst;
1138 if (cork->flags & IPCORK_OPT)
1139 opt = cork->opt;
1da177e4 1140
d8d1f30b 1141 if (!(rt->dst.dev->features&NETIF_F_SG))
1da177e4
LT
1142 return -EOPNOTSUPP;
1143
d8d1f30b 1144 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
bdc712b4 1145 mtu = cork->fragsize;
1da177e4
LT
1146
1147 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1148 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1149
bdc712b4 1150 if (cork->length + size > 0xFFFF - fragheaderlen) {
f5fca608 1151 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
1da177e4
LT
1152 return -EMSGSIZE;
1153 }
1154
1155 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1156 return -EINVAL;
1157
bdc712b4 1158 cork->length += size;
26cde9f7
HX
1159 if ((size + skb->len > mtu) &&
1160 (sk->sk_protocol == IPPROTO_UDP) &&
d8d1f30b 1161 (rt->dst.dev->features & NETIF_F_UFO)) {
7967168c 1162 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
f83ef8c0 1163 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
7967168c 1164 }
e89e9cf5 1165
1da177e4
LT
1166
1167 while (size > 0) {
1168 int i;
1169
89114afd 1170 if (skb_is_gso(skb))
e89e9cf5
AR
1171 len = size;
1172 else {
1173
1174 /* Check if the remaining data fits into current packet. */
1175 len = mtu - skb->len;
1176 if (len < size)
1177 len = maxfraglen - skb->len;
1178 }
1da177e4
LT
1179 if (len <= 0) {
1180 struct sk_buff *skb_prev;
1da177e4
LT
1181 int alloclen;
1182
1183 skb_prev = skb;
0d0d2bba 1184 fraggap = skb_prev->len - maxfraglen;
1da177e4
LT
1185
1186 alloclen = fragheaderlen + hh_len + fraggap + 15;
1187 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1188 if (unlikely(!skb)) {
1189 err = -ENOBUFS;
1190 goto error;
1191 }
1192
1193 /*
1194 * Fill in the control structures
1195 */
1196 skb->ip_summed = CHECKSUM_NONE;
1197 skb->csum = 0;
1198 skb_reserve(skb, hh_len);
1199
1200 /*
1201 * Find where to start putting bytes.
1202 */
967b05f6 1203 skb_put(skb, fragheaderlen + fraggap);
2ca9e6f2 1204 skb_reset_network_header(skb);
b0e380b1
ACM
1205 skb->transport_header = (skb->network_header +
1206 fragheaderlen);
1da177e4 1207 if (fraggap) {
967b05f6
ACM
1208 skb->csum = skb_copy_and_csum_bits(skb_prev,
1209 maxfraglen,
9c70220b 1210 skb_transport_header(skb),
967b05f6 1211 fraggap, 0);
1da177e4
LT
1212 skb_prev->csum = csum_sub(skb_prev->csum,
1213 skb->csum);
e9fa4f7b 1214 pskb_trim_unique(skb_prev, maxfraglen);
1da177e4
LT
1215 }
1216
1217 /*
1218 * Put the packet on the pending queue.
1219 */
1220 __skb_queue_tail(&sk->sk_write_queue, skb);
1221 continue;
1222 }
1223
1224 i = skb_shinfo(skb)->nr_frags;
1225 if (len > size)
1226 len = size;
1227 if (skb_can_coalesce(skb, i, page, offset)) {
1228 skb_shinfo(skb)->frags[i-1].size += len;
1229 } else if (i < MAX_SKB_FRAGS) {
1230 get_page(page);
1231 skb_fill_page_desc(skb, i, page, offset, len);
1232 } else {
1233 err = -EMSGSIZE;
1234 goto error;
1235 }
1236
1237 if (skb->ip_summed == CHECKSUM_NONE) {
44bb9363 1238 __wsum csum;
1da177e4
LT
1239 csum = csum_page(page, offset, len);
1240 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1241 }
1242
1243 skb->len += len;
1244 skb->data_len += len;
1e34a11d
DM
1245 skb->truesize += len;
1246 atomic_add(len, &sk->sk_wmem_alloc);
1da177e4
LT
1247 offset += len;
1248 size -= len;
1249 }
1250 return 0;
1251
1252error:
bdc712b4 1253 cork->length -= size;
5e38e270 1254 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1255 return err;
1256}
1257
1470ddf7 1258static void ip_cork_release(struct inet_cork *cork)
429f08e9 1259{
1470ddf7
HX
1260 cork->flags &= ~IPCORK_OPT;
1261 kfree(cork->opt);
1262 cork->opt = NULL;
1263 dst_release(cork->dst);
1264 cork->dst = NULL;
429f08e9
PE
1265}
1266
1da177e4
LT
1267/*
1268 * Combined all pending IP fragments on the socket as one IP datagram
1269 * and push them out.
1270 */
1c32c5ad 1271struct sk_buff *__ip_make_skb(struct sock *sk,
77968b78 1272 struct flowi4 *fl4,
1c32c5ad
HX
1273 struct sk_buff_head *queue,
1274 struct inet_cork *cork)
1da177e4
LT
1275{
1276 struct sk_buff *skb, *tmp_skb;
1277 struct sk_buff **tail_skb;
1278 struct inet_sock *inet = inet_sk(sk);
0388b004 1279 struct net *net = sock_net(sk);
1da177e4 1280 struct ip_options *opt = NULL;
1470ddf7 1281 struct rtable *rt = (struct rtable *)cork->dst;
1da177e4 1282 struct iphdr *iph;
76ab608d 1283 __be16 df = 0;
1da177e4 1284 __u8 ttl;
1da177e4 1285
1470ddf7 1286 if ((skb = __skb_dequeue(queue)) == NULL)
1da177e4
LT
1287 goto out;
1288 tail_skb = &(skb_shinfo(skb)->frag_list);
1289
1290 /* move skb->data to ip header from ext header */
d56f90a7 1291 if (skb->data < skb_network_header(skb))
bbe735e4 1292 __skb_pull(skb, skb_network_offset(skb));
1470ddf7 1293 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
cfe1fc77 1294 __skb_pull(tmp_skb, skb_network_header_len(skb));
1da177e4
LT
1295 *tail_skb = tmp_skb;
1296 tail_skb = &(tmp_skb->next);
1297 skb->len += tmp_skb->len;
1298 skb->data_len += tmp_skb->len;
1299 skb->truesize += tmp_skb->truesize;
1da177e4
LT
1300 tmp_skb->destructor = NULL;
1301 tmp_skb->sk = NULL;
1302 }
1303
1304 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1305 * to fragment the frame generated here. No matter, what transforms
1306 * how transforms change size of the packet, it will come out.
1307 */
628a5c56 1308 if (inet->pmtudisc < IP_PMTUDISC_DO)
1da177e4
LT
1309 skb->local_df = 1;
1310
1311 /* DF bit is set when we want to see DF on outgoing frames.
1312 * If local_df is set too, we still allow to fragment this frame
1313 * locally. */
628a5c56 1314 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
d8d1f30b
CG
1315 (skb->len <= dst_mtu(&rt->dst) &&
1316 ip_dont_fragment(sk, &rt->dst)))
1da177e4
LT
1317 df = htons(IP_DF);
1318
1470ddf7
HX
1319 if (cork->flags & IPCORK_OPT)
1320 opt = cork->opt;
1da177e4
LT
1321
1322 if (rt->rt_type == RTN_MULTICAST)
1323 ttl = inet->mc_ttl;
1324 else
d8d1f30b 1325 ttl = ip_select_ttl(inet, &rt->dst);
1da177e4
LT
1326
1327 iph = (struct iphdr *)skb->data;
1328 iph->version = 4;
1329 iph->ihl = 5;
1da177e4 1330 iph->tos = inet->tos;
1da177e4 1331 iph->frag_off = df;
d8d1f30b 1332 ip_select_ident(iph, &rt->dst, sk);
1da177e4
LT
1333 iph->ttl = ttl;
1334 iph->protocol = sk->sk_protocol;
77968b78
DM
1335 iph->saddr = fl4->saddr;
1336 iph->daddr = fl4->daddr;
1da177e4 1337
22f728f8
DM
1338 if (opt) {
1339 iph->ihl += opt->optlen>>2;
1340 ip_options_build(skb, opt, cork->addr, rt, 0);
1341 }
1342
1da177e4 1343 skb->priority = sk->sk_priority;
4a19ec58 1344 skb->mark = sk->sk_mark;
a21bba94
ED
1345 /*
1346 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1347 * on dst refcount
1348 */
1470ddf7 1349 cork->dst = NULL;
d8d1f30b 1350 skb_dst_set(skb, &rt->dst);
1da177e4 1351
96793b48 1352 if (iph->protocol == IPPROTO_ICMP)
0388b004 1353 icmp_out_count(net, ((struct icmphdr *)
96793b48
DS
1354 skb_transport_header(skb))->type);
1355
1c32c5ad
HX
1356 ip_cork_release(cork);
1357out:
1358 return skb;
1359}
1360
1361int ip_send_skb(struct sk_buff *skb)
1362{
1363 struct net *net = sock_net(skb->sk);
1364 int err;
1365
c439cb2e 1366 err = ip_local_out(skb);
1da177e4
LT
1367 if (err) {
1368 if (err > 0)
6ce9e7b5 1369 err = net_xmit_errno(err);
1da177e4 1370 if (err)
1c32c5ad 1371 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1372 }
1373
1da177e4 1374 return err;
1da177e4
LT
1375}
1376
77968b78 1377int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1470ddf7 1378{
1c32c5ad
HX
1379 struct sk_buff *skb;
1380
77968b78 1381 skb = ip_finish_skb(sk, fl4);
1c32c5ad
HX
1382 if (!skb)
1383 return 0;
1384
1385 /* Netfilter gets whole the not fragmented skb. */
1386 return ip_send_skb(skb);
1470ddf7
HX
1387}
1388
1da177e4
LT
1389/*
1390 * Throw away all pending data on the socket.
1391 */
1470ddf7
HX
1392static void __ip_flush_pending_frames(struct sock *sk,
1393 struct sk_buff_head *queue,
1394 struct inet_cork *cork)
1da177e4 1395{
1da177e4
LT
1396 struct sk_buff *skb;
1397
1470ddf7 1398 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1da177e4
LT
1399 kfree_skb(skb);
1400
1470ddf7
HX
1401 ip_cork_release(cork);
1402}
1403
1404void ip_flush_pending_frames(struct sock *sk)
1405{
bdc712b4 1406 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1da177e4
LT
1407}
1408
1c32c5ad 1409struct sk_buff *ip_make_skb(struct sock *sk,
77968b78 1410 struct flowi4 *fl4,
1c32c5ad
HX
1411 int getfrag(void *from, char *to, int offset,
1412 int len, int odd, struct sk_buff *skb),
1413 void *from, int length, int transhdrlen,
1414 struct ipcm_cookie *ipc, struct rtable **rtp,
1415 unsigned int flags)
1416{
b80d7226 1417 struct inet_cork cork;
1c32c5ad
HX
1418 struct sk_buff_head queue;
1419 int err;
1420
1421 if (flags & MSG_PROBE)
1422 return NULL;
1423
1424 __skb_queue_head_init(&queue);
1425
b80d7226
DM
1426 cork.flags = 0;
1427 cork.addr = 0;
70652728 1428 cork.opt = NULL;
1c32c5ad
HX
1429 err = ip_setup_cork(sk, &cork, ipc, rtp);
1430 if (err)
1431 return ERR_PTR(err);
1432
f5fca608 1433 err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
1c32c5ad
HX
1434 from, length, transhdrlen, flags);
1435 if (err) {
1436 __ip_flush_pending_frames(sk, &queue, &cork);
1437 return ERR_PTR(err);
1438 }
1439
77968b78 1440 return __ip_make_skb(sk, fl4, &queue, &cork);
1c32c5ad 1441}
1da177e4
LT
1442
1443/*
1444 * Fetch data from kernel space and fill in checksum if needed.
1445 */
e905a9ed 1446static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1da177e4
LT
1447 int len, int odd, struct sk_buff *skb)
1448{
5084205f 1449 __wsum csum;
1da177e4
LT
1450
1451 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1452 skb->csum = csum_block_add(skb->csum, csum, odd);
e905a9ed 1453 return 0;
1da177e4
LT
1454}
1455
e905a9ed 1456/*
1da177e4
LT
1457 * Generic function to send a packet as reply to another packet.
1458 * Used to send TCP resets so far. ICMP should use this function too.
1459 *
e905a9ed 1460 * Should run single threaded per socket because it uses the sock
1da177e4 1461 * structure to pass arguments.
1da177e4 1462 */
0a5ebb80
DM
1463void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1464 struct ip_reply_arg *arg, unsigned int len)
1da177e4
LT
1465{
1466 struct inet_sock *inet = inet_sk(sk);
f6d8bd05 1467 struct ip_options_data replyopts;
1da177e4 1468 struct ipcm_cookie ipc;
77968b78 1469 struct flowi4 fl4;
511c3f92 1470 struct rtable *rt = skb_rtable(skb);
1da177e4 1471
f6d8bd05 1472 if (ip_options_echo(&replyopts.opt.opt, skb))
1da177e4
LT
1473 return;
1474
0a5ebb80 1475 ipc.addr = daddr;
1da177e4 1476 ipc.opt = NULL;
2244d07b 1477 ipc.tx_flags = 0;
1da177e4 1478
f6d8bd05 1479 if (replyopts.opt.opt.optlen) {
1da177e4
LT
1480 ipc.opt = &replyopts.opt;
1481
f6d8bd05
ED
1482 if (replyopts.opt.opt.srr)
1483 daddr = replyopts.opt.opt.faddr;
1da177e4
LT
1484 }
1485
77968b78
DM
1486 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1487 RT_TOS(ip_hdr(skb)->tos),
1488 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1489 ip_reply_arg_flowi_flags(arg),
1490 daddr, rt->rt_spec_dst,
1491 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1492 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1493 rt = ip_route_output_key(sock_net(sk), &fl4);
1494 if (IS_ERR(rt))
1495 return;
1da177e4
LT
1496
1497 /* And let IP do all the hard work.
1498
1499 This chunk is not reenterable, hence spinlock.
1500 Note that it uses the fact, that this function is called
1501 with locally disabled BH and that sk cannot be already spinlocked.
1502 */
1503 bh_lock_sock(sk);
eddc9ec5 1504 inet->tos = ip_hdr(skb)->tos;
1da177e4 1505 sk->sk_priority = skb->priority;
eddc9ec5 1506 sk->sk_protocol = ip_hdr(skb)->protocol;
f0e48dbf 1507 sk->sk_bound_dev_if = arg->bound_dev_if;
f5fca608 1508 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
2e77d89b 1509 &ipc, &rt, MSG_DONTWAIT);
1da177e4
LT
1510 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1511 if (arg->csumoffset >= 0)
9c70220b
ACM
1512 *((__sum16 *)skb_transport_header(skb) +
1513 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1514 arg->csum));
1da177e4 1515 skb->ip_summed = CHECKSUM_NONE;
77968b78 1516 ip_push_pending_frames(sk, &fl4);
1da177e4
LT
1517 }
1518
1519 bh_unlock_sock(sk);
1520
1521 ip_rt_put(rt);
1522}
1523
1da177e4
LT
1524void __init ip_init(void)
1525{
1da177e4
LT
1526 ip_rt_init();
1527 inet_initpeers();
1528
1529#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1530 igmp_mc_proc_init();
1531#endif
1532}