net/ethtool: Add support for the ethtool feature to flash firmware image from a speci...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / tcp_output.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21/*
22 * Changes: Pedro Roque : Retransmit queue handled by TCP.
23 * : Fragmentation on mtu decrease
24 * : Segment collapse on retransmit
25 * : AF independence
26 *
27 * Linus Torvalds : send_delayed_ack
28 * David S. Miller : Charge memory using the right skb
29 * during syn/ack processing.
30 * David S. Miller : Output engine completely rewritten.
31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
32 * Cacophonix Gaul : draft-minshall-nagle-01
33 * J Hadi Salim : ECN support
34 *
35 */
36
37#include <net/tcp.h>
38
39#include <linux/compiler.h>
40#include <linux/module.h>
1da177e4
LT
41
42/* People can turn this off for buggy TCP's found in printers etc. */
ab32ea5d 43int sysctl_tcp_retrans_collapse __read_mostly = 1;
1da177e4 44
09cb105e 45/* People can turn this on to work with those rare, broken TCPs that
15d99e02
RJ
46 * interpret the window field as a signed quantity.
47 */
ab32ea5d 48int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
15d99e02 49
1da177e4
LT
50/* This limits the percentage of the congestion window which we
51 * will allow a single TSO frame to consume. Building TSO frames
52 * which are too large can cause TCP streams to be bursty.
53 */
ab32ea5d 54int sysctl_tcp_tso_win_divisor __read_mostly = 3;
1da177e4 55
ab32ea5d
BH
56int sysctl_tcp_mtu_probing __read_mostly = 0;
57int sysctl_tcp_base_mss __read_mostly = 512;
5d424d5a 58
35089bb2 59/* By default, RFC2861 behavior. */
ab32ea5d 60int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
35089bb2 61
67edfef7 62/* Account for new data that has been sent to the network. */
66f5fe62 63static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
1da177e4 64{
9e412ba7 65 struct tcp_sock *tp = tcp_sk(sk);
66f5fe62 66 unsigned int prior_packets = tp->packets_out;
9e412ba7 67
fe067e8a 68 tcp_advance_send_head(sk, skb);
1da177e4 69 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
8512430e
IJ
70
71 /* Don't override Nagle indefinately with F-RTO */
72 if (tp->frto_counter == 2)
73 tp->frto_counter = 3;
66f5fe62
IJ
74
75 tp->packets_out += tcp_skb_pcount(skb);
76 if (!prior_packets)
77 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
78 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
1da177e4
LT
79}
80
81/* SND.NXT, if window was not shrunk.
82 * If window has been shrunk, what should we make? It is not clear at all.
83 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
84 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
85 * invalid. OK, let's make this for now:
86 */
9e412ba7 87static inline __u32 tcp_acceptable_seq(struct sock *sk)
1da177e4 88{
9e412ba7
IJ
89 struct tcp_sock *tp = tcp_sk(sk);
90
90840def 91 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
1da177e4
LT
92 return tp->snd_nxt;
93 else
90840def 94 return tcp_wnd_end(tp);
1da177e4
LT
95}
96
97/* Calculate mss to advertise in SYN segment.
98 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
99 *
100 * 1. It is independent of path mtu.
101 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
102 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
103 * attached devices, because some buggy hosts are confused by
104 * large MSS.
105 * 4. We do not make 3, we advertise MSS, calculated from first
106 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
107 * This may be overridden via information stored in routing table.
108 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
109 * probably even Jumbo".
110 */
111static __u16 tcp_advertise_mss(struct sock *sk)
112{
113 struct tcp_sock *tp = tcp_sk(sk);
114 struct dst_entry *dst = __sk_dst_get(sk);
115 int mss = tp->advmss;
116
117 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
118 mss = dst_metric(dst, RTAX_ADVMSS);
119 tp->advmss = mss;
120 }
121
122 return (__u16)mss;
123}
124
125/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
126 * This is the first part of cwnd validation mechanism. */
463c84b9 127static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
1da177e4 128{
463c84b9 129 struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
130 s32 delta = tcp_time_stamp - tp->lsndtime;
131 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
132 u32 cwnd = tp->snd_cwnd;
133
6687e988 134 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1da177e4 135
6687e988 136 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1da177e4
LT
137 restart_cwnd = min(restart_cwnd, cwnd);
138
463c84b9 139 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1da177e4
LT
140 cwnd >>= 1;
141 tp->snd_cwnd = max(cwnd, restart_cwnd);
142 tp->snd_cwnd_stamp = tcp_time_stamp;
143 tp->snd_cwnd_used = 0;
144}
145
67edfef7 146/* Congestion state accounting after a packet has been sent. */
40efc6fa
SH
147static void tcp_event_data_sent(struct tcp_sock *tp,
148 struct sk_buff *skb, struct sock *sk)
1da177e4 149{
463c84b9
ACM
150 struct inet_connection_sock *icsk = inet_csk(sk);
151 const u32 now = tcp_time_stamp;
1da177e4 152
35089bb2
DM
153 if (sysctl_tcp_slow_start_after_idle &&
154 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
463c84b9 155 tcp_cwnd_restart(sk, __sk_dst_get(sk));
1da177e4
LT
156
157 tp->lsndtime = now;
158
159 /* If it is a reply for ato after last received
160 * packet, enter pingpong mode.
161 */
463c84b9
ACM
162 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
163 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
164}
165
67edfef7 166/* Account for an ACK we sent. */
40efc6fa 167static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1da177e4 168{
463c84b9
ACM
169 tcp_dec_quickack_mode(sk, pkts);
170 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1da177e4
LT
171}
172
173/* Determine a window scaling and initial window to offer.
174 * Based on the assumption that the given amount of space
175 * will be offered. Store the results in the tp structure.
176 * NOTE: for smooth operation initial space offering should
177 * be a multiple of mss if possible. We assume here that mss >= 1.
178 * This MUST be enforced by all callers.
179 */
180void tcp_select_initial_window(int __space, __u32 mss,
181 __u32 *rcv_wnd, __u32 *window_clamp,
182 int wscale_ok, __u8 *rcv_wscale)
183{
184 unsigned int space = (__space < 0 ? 0 : __space);
185
186 /* If no clamp set the clamp to the max possible scaled window */
187 if (*window_clamp == 0)
188 (*window_clamp) = (65535 << 14);
189 space = min(*window_clamp, space);
190
191 /* Quantize space offering to a multiple of mss if possible. */
192 if (space > mss)
193 space = (space / mss) * mss;
194
195 /* NOTE: offering an initial window larger than 32767
15d99e02
RJ
196 * will break some buggy TCP stacks. If the admin tells us
197 * it is likely we could be speaking with such a buggy stack
198 * we will truncate our initial window offering to 32K-1
199 * unless the remote has sent us a window scaling option,
200 * which we interpret as a sign the remote TCP is not
201 * misinterpreting the window field as a signed quantity.
1da177e4 202 */
15d99e02
RJ
203 if (sysctl_tcp_workaround_signed_windows)
204 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
205 else
206 (*rcv_wnd) = space;
207
1da177e4
LT
208 (*rcv_wscale) = 0;
209 if (wscale_ok) {
210 /* Set window scaling on max possible window
e905a9ed 211 * See RFC1323 for an explanation of the limit to 14
1da177e4
LT
212 */
213 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
316c1592 214 space = min_t(u32, space, *window_clamp);
1da177e4
LT
215 while (space > 65535 && (*rcv_wscale) < 14) {
216 space >>= 1;
217 (*rcv_wscale)++;
218 }
219 }
220
221 /* Set initial window to value enough for senders,
6b251858 222 * following RFC2414. Senders, not following this RFC,
1da177e4
LT
223 * will be satisfied with 2.
224 */
056834d9 225 if (mss > (1 << *rcv_wscale)) {
01ff367e 226 int init_cwnd = 4;
056834d9 227 if (mss > 1460 * 3)
1da177e4 228 init_cwnd = 2;
01ff367e
DM
229 else if (mss > 1460)
230 init_cwnd = 3;
056834d9
IJ
231 if (*rcv_wnd > init_cwnd * mss)
232 *rcv_wnd = init_cwnd * mss;
1da177e4
LT
233 }
234
235 /* Set the clamp no higher than max representable value */
236 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
237}
238
239/* Chose a new window to advertise, update state in tcp_sock for the
240 * socket, and return result with RFC1323 scaling applied. The return
241 * value can be stuffed directly into th->window for an outgoing
242 * frame.
243 */
40efc6fa 244static u16 tcp_select_window(struct sock *sk)
1da177e4
LT
245{
246 struct tcp_sock *tp = tcp_sk(sk);
247 u32 cur_win = tcp_receive_window(tp);
248 u32 new_win = __tcp_select_window(sk);
249
250 /* Never shrink the offered window */
2de979bd 251 if (new_win < cur_win) {
1da177e4
LT
252 /* Danger Will Robinson!
253 * Don't update rcv_wup/rcv_wnd here or else
254 * we will not be able to advertise a zero
255 * window in time. --DaveM
256 *
257 * Relax Will Robinson.
258 */
607bfbf2 259 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
1da177e4
LT
260 }
261 tp->rcv_wnd = new_win;
262 tp->rcv_wup = tp->rcv_nxt;
263
264 /* Make sure we do not exceed the maximum possible
265 * scaled window.
266 */
15d99e02 267 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
1da177e4
LT
268 new_win = min(new_win, MAX_TCP_WINDOW);
269 else
270 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
271
272 /* RFC1323 scaling applied */
273 new_win >>= tp->rx_opt.rcv_wscale;
274
275 /* If we advertise zero window, disable fast path. */
276 if (new_win == 0)
277 tp->pred_flags = 0;
278
279 return new_win;
280}
281
67edfef7 282/* Packet ECN state for a SYN-ACK */
056834d9 283static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
bdf1ee5d
IJ
284{
285 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
056834d9 286 if (!(tp->ecn_flags & TCP_ECN_OK))
bdf1ee5d
IJ
287 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
288}
289
67edfef7 290/* Packet ECN state for a SYN. */
bdf1ee5d
IJ
291static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
292{
293 struct tcp_sock *tp = tcp_sk(sk);
294
295 tp->ecn_flags = 0;
255cac91 296 if (sysctl_tcp_ecn == 1) {
056834d9 297 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
bdf1ee5d
IJ
298 tp->ecn_flags = TCP_ECN_OK;
299 }
300}
301
302static __inline__ void
303TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
304{
305 if (inet_rsk(req)->ecn_ok)
306 th->ece = 1;
307}
308
67edfef7
AK
309/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
310 * be sent.
311 */
bdf1ee5d
IJ
312static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
313 int tcp_header_len)
314{
315 struct tcp_sock *tp = tcp_sk(sk);
316
317 if (tp->ecn_flags & TCP_ECN_OK) {
318 /* Not-retransmitted data segment: set ECT and inject CWR. */
319 if (skb->len != tcp_header_len &&
320 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
321 INET_ECN_xmit(sk);
056834d9 322 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
bdf1ee5d
IJ
323 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
324 tcp_hdr(skb)->cwr = 1;
325 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
326 }
327 } else {
328 /* ACK or retransmitted segment: clear ECT|CE */
329 INET_ECN_dontxmit(sk);
330 }
331 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
332 tcp_hdr(skb)->ece = 1;
333 }
334}
335
e870a8ef
IJ
336/* Constructs common control bits of non-data skb. If SYN/FIN is present,
337 * auto increment end seqno.
338 */
339static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
340{
341 skb->csum = 0;
342
343 TCP_SKB_CB(skb)->flags = flags;
344 TCP_SKB_CB(skb)->sacked = 0;
345
346 skb_shinfo(skb)->gso_segs = 1;
347 skb_shinfo(skb)->gso_size = 0;
348 skb_shinfo(skb)->gso_type = 0;
349
350 TCP_SKB_CB(skb)->seq = seq;
351 if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
352 seq++;
353 TCP_SKB_CB(skb)->end_seq = seq;
354}
355
33f5f57e
IJ
356static inline int tcp_urg_mode(const struct tcp_sock *tp)
357{
358 return tp->snd_una != tp->snd_up;
359}
360
33ad798c
AL
361#define OPTION_SACK_ADVERTISE (1 << 0)
362#define OPTION_TS (1 << 1)
363#define OPTION_MD5 (1 << 2)
364
365struct tcp_out_options {
366 u8 options; /* bit field of OPTION_* */
367 u8 ws; /* window scale, 0 to disable */
368 u8 num_sack_blocks; /* number of SACK blocks to include */
369 u16 mss; /* 0 to disable */
370 __u32 tsval, tsecr; /* need to include OPTION_TS */
371};
372
67edfef7
AK
373/* Write previously computed TCP options to the packet.
374 *
375 * Beware: Something in the Internet is very sensitive to the ordering of
fd6149d3
IJ
376 * TCP options, we learned this through the hard way, so be careful here.
377 * Luckily we can at least blame others for their non-compliance but from
378 * inter-operatibility perspective it seems that we're somewhat stuck with
379 * the ordering which we have been using if we want to keep working with
380 * those broken things (not that it currently hurts anybody as there isn't
381 * particular reason why the ordering would need to be changed).
382 *
383 * At least SACK_PERM as the first option is known to lead to a disaster
384 * (but it may well be that other scenarios fail similarly).
385 */
33ad798c
AL
386static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
387 const struct tcp_out_options *opts,
388 __u8 **md5_hash) {
389 if (unlikely(OPTION_MD5 & opts->options)) {
496c98df
YH
390 *ptr++ = htonl((TCPOPT_NOP << 24) |
391 (TCPOPT_NOP << 16) |
33ad798c
AL
392 (TCPOPT_MD5SIG << 8) |
393 TCPOLEN_MD5SIG);
394 *md5_hash = (__u8 *)ptr;
395 ptr += 4;
396 } else {
397 *md5_hash = NULL;
40efc6fa 398 }
33ad798c 399
fd6149d3
IJ
400 if (unlikely(opts->mss)) {
401 *ptr++ = htonl((TCPOPT_MSS << 24) |
402 (TCPOLEN_MSS << 16) |
403 opts->mss);
404 }
405
33ad798c
AL
406 if (likely(OPTION_TS & opts->options)) {
407 if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) {
408 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
409 (TCPOLEN_SACK_PERM << 16) |
410 (TCPOPT_TIMESTAMP << 8) |
411 TCPOLEN_TIMESTAMP);
412 } else {
413 *ptr++ = htonl((TCPOPT_NOP << 24) |
414 (TCPOPT_NOP << 16) |
415 (TCPOPT_TIMESTAMP << 8) |
416 TCPOLEN_TIMESTAMP);
417 }
418 *ptr++ = htonl(opts->tsval);
419 *ptr++ = htonl(opts->tsecr);
420 }
421
33ad798c
AL
422 if (unlikely(OPTION_SACK_ADVERTISE & opts->options &&
423 !(OPTION_TS & opts->options))) {
424 *ptr++ = htonl((TCPOPT_NOP << 24) |
425 (TCPOPT_NOP << 16) |
426 (TCPOPT_SACK_PERM << 8) |
427 TCPOLEN_SACK_PERM);
428 }
429
430 if (unlikely(opts->ws)) {
431 *ptr++ = htonl((TCPOPT_NOP << 24) |
432 (TCPOPT_WINDOW << 16) |
433 (TCPOLEN_WINDOW << 8) |
434 opts->ws);
435 }
436
437 if (unlikely(opts->num_sack_blocks)) {
438 struct tcp_sack_block *sp = tp->rx_opt.dsack ?
439 tp->duplicate_sack : tp->selective_acks;
40efc6fa
SH
440 int this_sack;
441
442 *ptr++ = htonl((TCPOPT_NOP << 24) |
443 (TCPOPT_NOP << 16) |
444 (TCPOPT_SACK << 8) |
33ad798c 445 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
40efc6fa 446 TCPOLEN_SACK_PERBLOCK)));
2de979bd 447
33ad798c
AL
448 for (this_sack = 0; this_sack < opts->num_sack_blocks;
449 ++this_sack) {
40efc6fa
SH
450 *ptr++ = htonl(sp[this_sack].start_seq);
451 *ptr++ = htonl(sp[this_sack].end_seq);
452 }
2de979bd 453
5861f8e5 454 tp->rx_opt.dsack = 0;
40efc6fa 455 }
33ad798c
AL
456}
457
67edfef7
AK
458/* Compute TCP options for SYN packets. This is not the final
459 * network wire format yet.
460 */
33ad798c
AL
461static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
462 struct tcp_out_options *opts,
463 struct tcp_md5sig_key **md5) {
464 struct tcp_sock *tp = tcp_sk(sk);
465 unsigned size = 0;
466
cfb6eeb4 467#ifdef CONFIG_TCP_MD5SIG
33ad798c
AL
468 *md5 = tp->af_specific->md5_lookup(sk, sk);
469 if (*md5) {
470 opts->options |= OPTION_MD5;
471 size += TCPOLEN_MD5SIG_ALIGNED;
cfb6eeb4 472 }
33ad798c
AL
473#else
474 *md5 = NULL;
cfb6eeb4 475#endif
33ad798c
AL
476
477 /* We always get an MSS option. The option bytes which will be seen in
478 * normal data packets should timestamps be used, must be in the MSS
479 * advertised. But we subtract them from tp->mss_cache so that
480 * calculations in tcp_sendmsg are simpler etc. So account for this
481 * fact here if necessary. If we don't do this correctly, as a
482 * receiver we won't recognize data packets as being full sized when we
483 * should, and thus we won't abide by the delayed ACK rules correctly.
484 * SACKs don't matter, we never delay an ACK when we have any of those
485 * going out. */
486 opts->mss = tcp_advertise_mss(sk);
487 size += TCPOLEN_MSS_ALIGNED;
488
489 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
490 opts->options |= OPTION_TS;
491 opts->tsval = TCP_SKB_CB(skb)->when;
492 opts->tsecr = tp->rx_opt.ts_recent;
493 size += TCPOLEN_TSTAMP_ALIGNED;
494 }
495 if (likely(sysctl_tcp_window_scaling)) {
496 opts->ws = tp->rx_opt.rcv_wscale;
09cb105e 497 if (likely(opts->ws))
7982d5e1 498 size += TCPOLEN_WSCALE_ALIGNED;
33ad798c
AL
499 }
500 if (likely(sysctl_tcp_sack)) {
501 opts->options |= OPTION_SACK_ADVERTISE;
b32d1310 502 if (unlikely(!(OPTION_TS & opts->options)))
33ad798c
AL
503 size += TCPOLEN_SACKPERM_ALIGNED;
504 }
505
506 return size;
40efc6fa
SH
507}
508
67edfef7 509/* Set up TCP options for SYN-ACKs. */
33ad798c
AL
510static unsigned tcp_synack_options(struct sock *sk,
511 struct request_sock *req,
512 unsigned mss, struct sk_buff *skb,
513 struct tcp_out_options *opts,
514 struct tcp_md5sig_key **md5) {
515 unsigned size = 0;
516 struct inet_request_sock *ireq = inet_rsk(req);
517 char doing_ts;
518
cfb6eeb4 519#ifdef CONFIG_TCP_MD5SIG
33ad798c
AL
520 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
521 if (*md5) {
522 opts->options |= OPTION_MD5;
523 size += TCPOLEN_MD5SIG_ALIGNED;
cfb6eeb4 524 }
33ad798c
AL
525#else
526 *md5 = NULL;
cfb6eeb4 527#endif
33ad798c
AL
528
529 /* we can't fit any SACK blocks in a packet with MD5 + TS
530 options. There was discussion about disabling SACK rather than TS in
531 order to fit in better with old, buggy kernels, but that was deemed
532 to be unnecessary. */
533 doing_ts = ireq->tstamp_ok && !(*md5 && ireq->sack_ok);
534
535 opts->mss = mss;
536 size += TCPOLEN_MSS_ALIGNED;
537
538 if (likely(ireq->wscale_ok)) {
539 opts->ws = ireq->rcv_wscale;
09cb105e 540 if (likely(opts->ws))
7982d5e1 541 size += TCPOLEN_WSCALE_ALIGNED;
33ad798c
AL
542 }
543 if (likely(doing_ts)) {
544 opts->options |= OPTION_TS;
545 opts->tsval = TCP_SKB_CB(skb)->when;
546 opts->tsecr = req->ts_recent;
547 size += TCPOLEN_TSTAMP_ALIGNED;
548 }
549 if (likely(ireq->sack_ok)) {
550 opts->options |= OPTION_SACK_ADVERTISE;
551 if (unlikely(!doing_ts))
552 size += TCPOLEN_SACKPERM_ALIGNED;
553 }
554
555 return size;
556}
557
67edfef7
AK
558/* Compute TCP options for ESTABLISHED sockets. This is not the
559 * final wire format yet.
560 */
33ad798c
AL
561static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
562 struct tcp_out_options *opts,
563 struct tcp_md5sig_key **md5) {
564 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
565 struct tcp_sock *tp = tcp_sk(sk);
566 unsigned size = 0;
cabeccbd 567 unsigned int eff_sacks;
33ad798c
AL
568
569#ifdef CONFIG_TCP_MD5SIG
570 *md5 = tp->af_specific->md5_lookup(sk, sk);
571 if (unlikely(*md5)) {
572 opts->options |= OPTION_MD5;
573 size += TCPOLEN_MD5SIG_ALIGNED;
574 }
575#else
576 *md5 = NULL;
577#endif
578
579 if (likely(tp->rx_opt.tstamp_ok)) {
580 opts->options |= OPTION_TS;
581 opts->tsval = tcb ? tcb->when : 0;
582 opts->tsecr = tp->rx_opt.ts_recent;
583 size += TCPOLEN_TSTAMP_ALIGNED;
584 }
585
cabeccbd
IJ
586 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
587 if (unlikely(eff_sacks)) {
33ad798c
AL
588 const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
589 opts->num_sack_blocks =
cabeccbd 590 min_t(unsigned, eff_sacks,
33ad798c
AL
591 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
592 TCPOLEN_SACK_PERBLOCK);
593 size += TCPOLEN_SACK_BASE_ALIGNED +
594 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
595 }
596
597 return size;
40efc6fa 598}
1da177e4
LT
599
600/* This routine actually transmits TCP packets queued in by
601 * tcp_do_sendmsg(). This is used by both the initial
602 * transmission and possible later retransmissions.
603 * All SKB's seen here are completely headerless. It is our
604 * job to build the TCP header, and pass the packet down to
605 * IP so it can do the same plus pass the packet off to the
606 * device.
607 *
608 * We are working here with either a clone of the original
609 * SKB, or a fresh unique copy made by the retransmit engine.
610 */
056834d9
IJ
611static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
612 gfp_t gfp_mask)
1da177e4 613{
dfb4b9dc
DM
614 const struct inet_connection_sock *icsk = inet_csk(sk);
615 struct inet_sock *inet;
616 struct tcp_sock *tp;
617 struct tcp_skb_cb *tcb;
33ad798c
AL
618 struct tcp_out_options opts;
619 unsigned tcp_options_size, tcp_header_size;
cfb6eeb4
YH
620 struct tcp_md5sig_key *md5;
621 __u8 *md5_hash_location;
dfb4b9dc 622 struct tcphdr *th;
dfb4b9dc
DM
623 int err;
624
625 BUG_ON(!skb || !tcp_skb_pcount(skb));
626
627 /* If congestion control is doing timestamping, we must
628 * take such a timestamp before we potentially clone/copy.
629 */
164891aa 630 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
dfb4b9dc
DM
631 __net_timestamp(skb);
632
633 if (likely(clone_it)) {
634 if (unlikely(skb_cloned(skb)))
635 skb = pskb_copy(skb, gfp_mask);
636 else
637 skb = skb_clone(skb, gfp_mask);
638 if (unlikely(!skb))
639 return -ENOBUFS;
640 }
1da177e4 641
dfb4b9dc
DM
642 inet = inet_sk(sk);
643 tp = tcp_sk(sk);
644 tcb = TCP_SKB_CB(skb);
33ad798c 645 memset(&opts, 0, sizeof(opts));
1da177e4 646
33ad798c
AL
647 if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
648 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
649 else
650 tcp_options_size = tcp_established_options(sk, skb, &opts,
651 &md5);
652 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
e905a9ed 653
dfb4b9dc
DM
654 if (tcp_packets_in_flight(tp) == 0)
655 tcp_ca_event(sk, CA_EVENT_TX_START);
656
aa8223c7
ACM
657 skb_push(skb, tcp_header_size);
658 skb_reset_transport_header(skb);
e89862f4 659 skb_set_owner_w(skb, sk);
dfb4b9dc
DM
660
661 /* Build TCP header and checksum it. */
aa8223c7 662 th = tcp_hdr(skb);
dfb4b9dc
DM
663 th->source = inet->sport;
664 th->dest = inet->dport;
665 th->seq = htonl(tcb->seq);
666 th->ack_seq = htonl(tp->rcv_nxt);
df7a3b07 667 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
dfb4b9dc
DM
668 tcb->flags);
669
670 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
671 /* RFC1323: The window in SYN & SYN/ACK segments
672 * is never scaled.
673 */
600ff0c2 674 th->window = htons(min(tp->rcv_wnd, 65535U));
dfb4b9dc
DM
675 } else {
676 th->window = htons(tcp_select_window(sk));
677 }
678 th->check = 0;
679 th->urg_ptr = 0;
1da177e4 680
33f5f57e 681 /* The urg_mode check is necessary during a below snd_una win probe */
7691367d
HX
682 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
683 if (before(tp->snd_up, tcb->seq + 0x10000)) {
684 th->urg_ptr = htons(tp->snd_up - tcb->seq);
685 th->urg = 1;
686 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
687 th->urg_ptr = 0xFFFF;
688 th->urg = 1;
689 }
dfb4b9dc 690 }
1da177e4 691
33ad798c
AL
692 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
693 if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
9e412ba7 694 TCP_ECN_send(sk, skb, tcp_header_size);
1da177e4 695
cfb6eeb4
YH
696#ifdef CONFIG_TCP_MD5SIG
697 /* Calculate the MD5 hash, as we have all we need now */
698 if (md5) {
33ad798c 699 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 700 tp->af_specific->calc_md5_hash(md5_hash_location,
49a72dfb 701 md5, sk, NULL, skb);
cfb6eeb4
YH
702 }
703#endif
704
8292a17a 705 icsk->icsk_af_ops->send_check(sk, skb->len, skb);
1da177e4 706
dfb4b9dc
DM
707 if (likely(tcb->flags & TCPCB_FLAG_ACK))
708 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
1da177e4 709
dfb4b9dc
DM
710 if (skb->len != tcp_header_size)
711 tcp_event_data_sent(tp, skb, sk);
1da177e4 712
bd37a088 713 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
81cc8a75 714 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
1da177e4 715
e89862f4 716 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
83de47cd 717 if (likely(err <= 0))
dfb4b9dc
DM
718 return err;
719
3cfe3baa 720 tcp_enter_cwr(sk, 1);
dfb4b9dc 721
b9df3cb8 722 return net_xmit_eval(err);
1da177e4
LT
723}
724
67edfef7 725/* This routine just queues the buffer for sending.
1da177e4
LT
726 *
727 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
728 * otherwise socket can stall.
729 */
730static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
731{
732 struct tcp_sock *tp = tcp_sk(sk);
733
734 /* Advance write_seq and place onto the write_queue. */
735 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
736 skb_header_release(skb);
fe067e8a 737 tcp_add_write_queue_tail(sk, skb);
3ab224be
HA
738 sk->sk_wmem_queued += skb->truesize;
739 sk_mem_charge(sk, skb->truesize);
1da177e4
LT
740}
741
67edfef7 742/* Initialize TSO segments for a packet. */
056834d9
IJ
743static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
744 unsigned int mss_now)
f6302d1d 745{
8e5b9dda
HX
746 if (skb->len <= mss_now || !sk_can_gso(sk) ||
747 skb->ip_summed == CHECKSUM_NONE) {
f6302d1d
DM
748 /* Avoid the costly divide in the normal
749 * non-TSO case.
750 */
7967168c
HX
751 skb_shinfo(skb)->gso_segs = 1;
752 skb_shinfo(skb)->gso_size = 0;
753 skb_shinfo(skb)->gso_type = 0;
f6302d1d 754 } else {
356f89e1 755 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
7967168c 756 skb_shinfo(skb)->gso_size = mss_now;
bcd76111 757 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1da177e4
LT
758 }
759}
760
91fed7a1 761/* When a modification to fackets out becomes necessary, we need to check
68f8353b 762 * skb is counted to fackets_out or not.
91fed7a1 763 */
a47e5a98 764static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
91fed7a1
IJ
765 int decr)
766{
a47e5a98
IJ
767 struct tcp_sock *tp = tcp_sk(sk);
768
dc86967b 769 if (!tp->sacked_out || tcp_is_reno(tp))
91fed7a1
IJ
770 return;
771
6859d494 772 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
91fed7a1 773 tp->fackets_out -= decr;
91fed7a1
IJ
774}
775
797108d1
IJ
776/* Pcount in the middle of the write queue got changed, we need to do various
777 * tweaks to fix counters
778 */
779static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
780{
781 struct tcp_sock *tp = tcp_sk(sk);
782
783 tp->packets_out -= decr;
784
785 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
786 tp->sacked_out -= decr;
787 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
788 tp->retrans_out -= decr;
789 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
790 tp->lost_out -= decr;
791
792 /* Reno case is special. Sigh... */
793 if (tcp_is_reno(tp) && decr > 0)
794 tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
795
796 tcp_adjust_fackets_out(sk, skb, decr);
797
798 if (tp->lost_skb_hint &&
799 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
52cf3cc8 800 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
797108d1
IJ
801 tp->lost_cnt_hint -= decr;
802
803 tcp_verify_left_out(tp);
804}
805
1da177e4
LT
806/* Function to create two new TCP segments. Shrinks the given segment
807 * to the specified size and appends a new segment with the rest of the
e905a9ed 808 * packet to the list. This won't be called frequently, I hope.
1da177e4
LT
809 * Remember, these are still headerless SKBs at this point.
810 */
056834d9
IJ
811int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
812 unsigned int mss_now)
1da177e4
LT
813{
814 struct tcp_sock *tp = tcp_sk(sk);
815 struct sk_buff *buff;
6475be16 816 int nsize, old_factor;
b60b49ea 817 int nlen;
9ce01461 818 u8 flags;
1da177e4 819
b2cc99f0 820 BUG_ON(len > skb->len);
6a438bbe 821
1da177e4
LT
822 nsize = skb_headlen(skb) - len;
823 if (nsize < 0)
824 nsize = 0;
825
826 if (skb_cloned(skb) &&
827 skb_is_nonlinear(skb) &&
828 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
829 return -ENOMEM;
830
831 /* Get a new skb... force flag on. */
832 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
833 if (buff == NULL)
834 return -ENOMEM; /* We'll just try again later. */
ef5cb973 835
3ab224be
HA
836 sk->sk_wmem_queued += buff->truesize;
837 sk_mem_charge(sk, buff->truesize);
b60b49ea
HX
838 nlen = skb->len - len - nsize;
839 buff->truesize += nlen;
840 skb->truesize -= nlen;
1da177e4
LT
841
842 /* Correct the sequence numbers. */
843 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
844 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
845 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
846
847 /* PSH and FIN should only be set in the second packet. */
848 flags = TCP_SKB_CB(skb)->flags;
056834d9 849 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
1da177e4 850 TCP_SKB_CB(buff)->flags = flags;
e14c3caf 851 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1da177e4 852
84fa7933 853 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
1da177e4 854 /* Copy and checksum data tail into the new buffer. */
056834d9
IJ
855 buff->csum = csum_partial_copy_nocheck(skb->data + len,
856 skb_put(buff, nsize),
1da177e4
LT
857 nsize, 0);
858
859 skb_trim(skb, len);
860
861 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
862 } else {
84fa7933 863 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
864 skb_split(skb, buff, len);
865 }
866
867 buff->ip_summed = skb->ip_summed;
868
869 /* Looks stupid, but our code really uses when of
870 * skbs, which it never sent before. --ANK
871 */
872 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
a61bbcf2 873 buff->tstamp = skb->tstamp;
1da177e4 874
6475be16
DM
875 old_factor = tcp_skb_pcount(skb);
876
1da177e4 877 /* Fix up tso_factor for both original and new SKB. */
846998ae
DM
878 tcp_set_skb_tso_segs(sk, skb, mss_now);
879 tcp_set_skb_tso_segs(sk, buff, mss_now);
1da177e4 880
6475be16
DM
881 /* If this packet has been sent out already, we must
882 * adjust the various packet counters.
883 */
cf0b450c 884 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
6475be16
DM
885 int diff = old_factor - tcp_skb_pcount(skb) -
886 tcp_skb_pcount(buff);
1da177e4 887
797108d1
IJ
888 if (diff)
889 tcp_adjust_pcount(sk, skb, diff);
1da177e4
LT
890 }
891
892 /* Link BUFF into the send queue. */
f44b5271 893 skb_header_release(buff);
fe067e8a 894 tcp_insert_write_queue_after(skb, buff, sk);
1da177e4
LT
895
896 return 0;
897}
898
899/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
900 * eventually). The difference is that pulled data not copied, but
901 * immediately discarded.
902 */
f2911969 903static void __pskb_trim_head(struct sk_buff *skb, int len)
1da177e4
LT
904{
905 int i, k, eat;
906
907 eat = len;
908 k = 0;
056834d9 909 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4
LT
910 if (skb_shinfo(skb)->frags[i].size <= eat) {
911 put_page(skb_shinfo(skb)->frags[i].page);
912 eat -= skb_shinfo(skb)->frags[i].size;
913 } else {
914 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
915 if (eat) {
916 skb_shinfo(skb)->frags[k].page_offset += eat;
917 skb_shinfo(skb)->frags[k].size -= eat;
918 eat = 0;
919 }
920 k++;
921 }
922 }
923 skb_shinfo(skb)->nr_frags = k;
924
27a884dc 925 skb_reset_tail_pointer(skb);
1da177e4
LT
926 skb->data_len -= len;
927 skb->len = skb->data_len;
1da177e4
LT
928}
929
67edfef7 930/* Remove acked data from a packet in the transmit queue. */
1da177e4
LT
931int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
932{
056834d9 933 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1da177e4
LT
934 return -ENOMEM;
935
f2911969
HX
936 /* If len == headlen, we avoid __skb_pull to preserve alignment. */
937 if (unlikely(len < skb_headlen(skb)))
1da177e4 938 __skb_pull(skb, len);
f2911969
HX
939 else
940 __pskb_trim_head(skb, len - skb_headlen(skb));
1da177e4
LT
941
942 TCP_SKB_CB(skb)->seq += len;
84fa7933 943 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
944
945 skb->truesize -= len;
946 sk->sk_wmem_queued -= len;
3ab224be 947 sk_mem_uncharge(sk, len);
1da177e4
LT
948 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
949
950 /* Any change of skb->len requires recalculation of tso
951 * factor and mss.
952 */
953 if (tcp_skb_pcount(skb) > 1)
0c54b85f 954 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
1da177e4
LT
955
956 return 0;
957}
958
67edfef7 959/* Calculate MSS. Not accounting for SACKs here. */
5d424d5a
JH
960int tcp_mtu_to_mss(struct sock *sk, int pmtu)
961{
962 struct tcp_sock *tp = tcp_sk(sk);
963 struct inet_connection_sock *icsk = inet_csk(sk);
964 int mss_now;
965
966 /* Calculate base mss without TCP options:
967 It is MMS_S - sizeof(tcphdr) of rfc1122
968 */
969 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
970
971 /* Clamp it (mss_clamp does not include tcp options) */
972 if (mss_now > tp->rx_opt.mss_clamp)
973 mss_now = tp->rx_opt.mss_clamp;
974
975 /* Now subtract optional transport overhead */
976 mss_now -= icsk->icsk_ext_hdr_len;
977
978 /* Then reserve room for full set of TCP options and 8 bytes of data */
979 if (mss_now < 48)
980 mss_now = 48;
981
982 /* Now subtract TCP options size, not including SACKs */
983 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
984
985 return mss_now;
986}
987
988/* Inverse of above */
989int tcp_mss_to_mtu(struct sock *sk, int mss)
990{
991 struct tcp_sock *tp = tcp_sk(sk);
992 struct inet_connection_sock *icsk = inet_csk(sk);
993 int mtu;
994
995 mtu = mss +
996 tp->tcp_header_len +
997 icsk->icsk_ext_hdr_len +
998 icsk->icsk_af_ops->net_header_len;
999
1000 return mtu;
1001}
1002
67edfef7 1003/* MTU probing init per socket */
5d424d5a
JH
1004void tcp_mtup_init(struct sock *sk)
1005{
1006 struct tcp_sock *tp = tcp_sk(sk);
1007 struct inet_connection_sock *icsk = inet_csk(sk);
1008
1009 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
1010 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
e905a9ed 1011 icsk->icsk_af_ops->net_header_len;
5d424d5a
JH
1012 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1013 icsk->icsk_mtup.probe_size = 0;
1014}
1015
1da177e4
LT
1016/* This function synchronize snd mss to current pmtu/exthdr set.
1017
1018 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1019 for TCP options, but includes only bare TCP header.
1020
1021 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
caa20d9a 1022 It is minimum of user_mss and mss received with SYN.
1da177e4
LT
1023 It also does not include TCP options.
1024
d83d8461 1025 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1da177e4
LT
1026
1027 tp->mss_cache is current effective sending mss, including
1028 all tcp options except for SACKs. It is evaluated,
1029 taking into account current pmtu, but never exceeds
1030 tp->rx_opt.mss_clamp.
1031
1032 NOTE1. rfc1122 clearly states that advertised MSS
1033 DOES NOT include either tcp or ip options.
1034
d83d8461
ACM
1035 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1036 are READ ONLY outside this function. --ANK (980731)
1da177e4 1037 */
1da177e4
LT
1038unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1039{
1040 struct tcp_sock *tp = tcp_sk(sk);
d83d8461 1041 struct inet_connection_sock *icsk = inet_csk(sk);
5d424d5a 1042 int mss_now;
1da177e4 1043
5d424d5a
JH
1044 if (icsk->icsk_mtup.search_high > pmtu)
1045 icsk->icsk_mtup.search_high = pmtu;
1da177e4 1046
5d424d5a 1047 mss_now = tcp_mtu_to_mss(sk, pmtu);
409d22b4 1048 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1da177e4
LT
1049
1050 /* And store cached results */
d83d8461 1051 icsk->icsk_pmtu_cookie = pmtu;
5d424d5a
JH
1052 if (icsk->icsk_mtup.enabled)
1053 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
c1b4a7e6 1054 tp->mss_cache = mss_now;
1da177e4
LT
1055
1056 return mss_now;
1057}
1058
1059/* Compute the current effective MSS, taking SACKs and IP options,
1060 * and even PMTU discovery events into account.
1da177e4 1061 */
0c54b85f 1062unsigned int tcp_current_mss(struct sock *sk)
1da177e4
LT
1063{
1064 struct tcp_sock *tp = tcp_sk(sk);
1065 struct dst_entry *dst = __sk_dst_get(sk);
c1b4a7e6 1066 u32 mss_now;
33ad798c
AL
1067 unsigned header_len;
1068 struct tcp_out_options opts;
1069 struct tcp_md5sig_key *md5;
c1b4a7e6
DM
1070
1071 mss_now = tp->mss_cache;
1072
1da177e4
LT
1073 if (dst) {
1074 u32 mtu = dst_mtu(dst);
d83d8461 1075 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1da177e4
LT
1076 mss_now = tcp_sync_mss(sk, mtu);
1077 }
1078
33ad798c
AL
1079 header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1080 sizeof(struct tcphdr);
1081 /* The mss_cache is sized based on tp->tcp_header_len, which assumes
1082 * some common options. If this is an odd packet (because we have SACK
1083 * blocks etc) then our calculated header_len will be different, and
1084 * we have to adjust mss_now correspondingly */
1085 if (header_len != tp->tcp_header_len) {
1086 int delta = (int) header_len - tp->tcp_header_len;
1087 mss_now -= delta;
1088 }
cfb6eeb4 1089
1da177e4
LT
1090 return mss_now;
1091}
1092
a762a980 1093/* Congestion window validation. (RFC2861) */
9e412ba7 1094static void tcp_cwnd_validate(struct sock *sk)
a762a980 1095{
9e412ba7 1096 struct tcp_sock *tp = tcp_sk(sk);
a762a980 1097
d436d686 1098 if (tp->packets_out >= tp->snd_cwnd) {
a762a980
DM
1099 /* Network is feed fully. */
1100 tp->snd_cwnd_used = 0;
1101 tp->snd_cwnd_stamp = tcp_time_stamp;
1102 } else {
1103 /* Network starves. */
1104 if (tp->packets_out > tp->snd_cwnd_used)
1105 tp->snd_cwnd_used = tp->packets_out;
1106
15d33c07
DM
1107 if (sysctl_tcp_slow_start_after_idle &&
1108 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
a762a980
DM
1109 tcp_cwnd_application_limited(sk);
1110 }
1111}
1112
0e3a4803
IJ
1113/* Returns the portion of skb which can be sent right away without
1114 * introducing MSS oddities to segment boundaries. In rare cases where
1115 * mss_now != mss_cache, we will request caller to create a small skb
1116 * per input skb which could be mostly avoided here (if desired).
5ea3a748
IJ
1117 *
1118 * We explicitly want to create a request for splitting write queue tail
1119 * to a small skb for Nagle purposes while avoiding unnecessary modulos,
1120 * thus all the complexity (cwnd_len is always MSS multiple which we
1121 * return whenever allowed by the other factors). Basically we need the
1122 * modulo only when the receiver window alone is the limiting factor or
1123 * when we would be allowed to send the split-due-to-Nagle skb fully.
0e3a4803
IJ
1124 */
1125static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
056834d9 1126 unsigned int mss_now, unsigned int cwnd)
c1b4a7e6 1127{
0e3a4803
IJ
1128 struct tcp_sock *tp = tcp_sk(sk);
1129 u32 needed, window, cwnd_len;
c1b4a7e6 1130
90840def 1131 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
c1b4a7e6 1132 cwnd_len = mss_now * cwnd;
0e3a4803
IJ
1133
1134 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
1135 return cwnd_len;
1136
5ea3a748
IJ
1137 needed = min(skb->len, window);
1138
17515408 1139 if (cwnd_len <= needed)
0e3a4803
IJ
1140 return cwnd_len;
1141
0e3a4803 1142 return needed - needed % mss_now;
c1b4a7e6
DM
1143}
1144
1145/* Can at least one segment of SKB be sent right now, according to the
1146 * congestion window rules? If so, return how many segments are allowed.
1147 */
056834d9
IJ
1148static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1149 struct sk_buff *skb)
c1b4a7e6
DM
1150{
1151 u32 in_flight, cwnd;
1152
1153 /* Don't be strict about the congestion window for the final FIN. */
104439a8
JH
1154 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1155 tcp_skb_pcount(skb) == 1)
c1b4a7e6
DM
1156 return 1;
1157
1158 in_flight = tcp_packets_in_flight(tp);
1159 cwnd = tp->snd_cwnd;
1160 if (in_flight < cwnd)
1161 return (cwnd - in_flight);
1162
1163 return 0;
1164}
1165
67edfef7
AK
1166/* Intialize TSO state of a skb.
1167 * This must be invoked the first time we consider transmitting
c1b4a7e6
DM
1168 * SKB onto the wire.
1169 */
056834d9
IJ
1170static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
1171 unsigned int mss_now)
c1b4a7e6
DM
1172{
1173 int tso_segs = tcp_skb_pcount(skb);
1174
f8269a49 1175 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
846998ae 1176 tcp_set_skb_tso_segs(sk, skb, mss_now);
c1b4a7e6
DM
1177 tso_segs = tcp_skb_pcount(skb);
1178 }
1179 return tso_segs;
1180}
1181
67edfef7 1182/* Minshall's variant of the Nagle send check. */
c1b4a7e6
DM
1183static inline int tcp_minshall_check(const struct tcp_sock *tp)
1184{
09cb105e 1185 return after(tp->snd_sml, tp->snd_una) &&
c1b4a7e6
DM
1186 !after(tp->snd_sml, tp->snd_nxt);
1187}
1188
1189/* Return 0, if packet can be sent now without violation Nagle's rules:
1190 * 1. It is full sized.
1191 * 2. Or it contains FIN. (already checked by caller)
1192 * 3. Or TCP_NODELAY was set.
1193 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1194 * With Minshall's modification: all sent small packets are ACKed.
1195 */
c1b4a7e6 1196static inline int tcp_nagle_check(const struct tcp_sock *tp,
e905a9ed 1197 const struct sk_buff *skb,
c1b4a7e6
DM
1198 unsigned mss_now, int nonagle)
1199{
1200 return (skb->len < mss_now &&
056834d9
IJ
1201 ((nonagle & TCP_NAGLE_CORK) ||
1202 (!nonagle && tp->packets_out && tcp_minshall_check(tp))));
c1b4a7e6
DM
1203}
1204
1205/* Return non-zero if the Nagle test allows this packet to be
1206 * sent now.
1207 */
1208static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1209 unsigned int cur_mss, int nonagle)
1210{
1211 /* Nagle rule does not apply to frames, which sit in the middle of the
1212 * write_queue (they have no chances to get new data).
1213 *
1214 * This is implemented in the callers, where they modify the 'nonagle'
1215 * argument based upon the location of SKB in the send queue.
1216 */
1217 if (nonagle & TCP_NAGLE_PUSH)
1218 return 1;
1219
d551e454
IJ
1220 /* Don't use the nagle rule for urgent data (or for the final FIN).
1221 * Nagle can be ignored during F-RTO too (see RFC4138).
1222 */
33f5f57e 1223 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
c1b4a7e6
DM
1224 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1225 return 1;
1226
1227 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1228 return 1;
1229
1230 return 0;
1231}
1232
1233/* Does at least the first segment of SKB fit into the send window? */
056834d9
IJ
1234static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
1235 unsigned int cur_mss)
c1b4a7e6
DM
1236{
1237 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1238
1239 if (skb->len > cur_mss)
1240 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1241
90840def 1242 return !after(end_seq, tcp_wnd_end(tp));
c1b4a7e6
DM
1243}
1244
fe067e8a 1245/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
c1b4a7e6
DM
1246 * should be put on the wire right now. If so, it returns the number of
1247 * packets allowed by the congestion window.
1248 */
1249static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1250 unsigned int cur_mss, int nonagle)
1251{
1252 struct tcp_sock *tp = tcp_sk(sk);
1253 unsigned int cwnd_quota;
1254
846998ae 1255 tcp_init_tso_segs(sk, skb, cur_mss);
c1b4a7e6
DM
1256
1257 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1258 return 0;
1259
1260 cwnd_quota = tcp_cwnd_test(tp, skb);
056834d9 1261 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
c1b4a7e6
DM
1262 cwnd_quota = 0;
1263
1264 return cwnd_quota;
1265}
1266
67edfef7 1267/* Test if sending is allowed right now. */
9e412ba7 1268int tcp_may_send_now(struct sock *sk)
c1b4a7e6 1269{
9e412ba7 1270 struct tcp_sock *tp = tcp_sk(sk);
fe067e8a 1271 struct sk_buff *skb = tcp_send_head(sk);
c1b4a7e6
DM
1272
1273 return (skb &&
0c54b85f 1274 tcp_snd_test(sk, skb, tcp_current_mss(sk),
c1b4a7e6 1275 (tcp_skb_is_last(sk, skb) ?