tcp: Maintain dynamic metrics in local cache.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / tcp.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
1da177e4
LT
21#define FASTRETRANS_DEBUG 1
22
1da177e4
LT
23#include <linux/list.h>
24#include <linux/tcp.h>
187f1882 25#include <linux/bug.h>
1da177e4
LT
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
fb286bb2 29#include <linux/skbuff.h>
97fc2f08 30#include <linux/dmaengine.h>
cfb6eeb4 31#include <linux/crypto.h>
c6aefafb 32#include <linux/cryptohash.h>
435cf559 33#include <linux/kref.h>
3f421baa
ACM
34
35#include <net/inet_connection_sock.h>
295ff7ed 36#include <net/inet_timewait_sock.h>
77d8bf9c 37#include <net/inet_hashtables.h>
1da177e4 38#include <net/checksum.h>
2e6599cb 39#include <net/request_sock.h>
1da177e4
LT
40#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
c752f073 43#include <net/tcp_states.h>
bdf1ee5d 44#include <net/inet_ecn.h>
0c266898 45#include <net/dst.h>
c752f073 46
1da177e4 47#include <linux/seq_file.h>
180d8cd9 48#include <linux/memcontrol.h>
1da177e4 49
6e04e021 50extern struct inet_hashinfo tcp_hashinfo;
1da177e4 51
dd24c001 52extern struct percpu_counter tcp_orphan_count;
1da177e4 53extern void tcp_time_wait(struct sock *sk, int state, int timeo);
1da177e4 54
1da177e4 55#define MAX_TCP_HEADER (128 + MAX_HEADER)
33ad798c 56#define MAX_TCP_OPTION_SPACE 40
1da177e4
LT
57
58/*
59 * Never offer a window over 32767 without using window scaling. Some
60 * poor stacks do signed 16bit maths!
61 */
62#define MAX_TCP_WINDOW 32767U
63
356f0398
ND
64/* Offer an initial receive window of 10 mss. */
65#define TCP_DEFAULT_INIT_RCVWND 10
66
1da177e4
LT
67/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68#define TCP_MIN_MSS 88U
69
5d424d5a
JH
70/* The least MTU to use for probing */
71#define TCP_BASE_MSS 512
72
1da177e4
LT
73/* After receiving this amount of duplicate ACKs fast retransmit starts. */
74#define TCP_FASTRETRANS_THRESH 3
75
76/* Maximal reordering. */
77#define TCP_MAX_REORDERING 127
78
79/* Maximal number of ACKs sent quickly to accelerate slow-start. */
80#define TCP_MAX_QUICKACKS 16U
81
82/* urg_data states */
83#define TCP_URG_VALID 0x0100
84#define TCP_URG_NOTYET 0x0200
85#define TCP_URG_READ 0x0400
86
87#define TCP_RETR1 3 /*
88 * This is how many retries it does before it
89 * tries to figure out if the gateway is
90 * down. Minimal RFC value is 3; it corresponds
91 * to ~3sec-8min depending on RTO.
92 */
93
94#define TCP_RETR2 15 /*
95 * This should take at least
96 * 90 minutes to time out.
97 * RFC1122 says that the limit is 100 sec.
98 * 15 is ~13-30min depending on RTO.
99 */
100
101#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
caa20d9a 102 * connection: ~180sec is RFC minimum */
1da177e4
LT
103
104#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
caa20d9a 105 * connection: ~180sec is RFC minimum */
1da177e4 106
1da177e4
LT
107#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
108 * state, about 60 seconds */
109#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
110 /* BSD style FIN_WAIT2 deadlock breaker.
111 * It used to be 3min, new value is 60sec,
112 * to combine FIN-WAIT-2 timeout with
113 * TIME-WAIT timer.
114 */
115
116#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
117#if HZ >= 100
118#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
119#define TCP_ATO_MIN ((unsigned)(HZ/25))
120#else
121#define TCP_DELACK_MIN 4U
122#define TCP_ATO_MIN 4U
123#endif
124#define TCP_RTO_MAX ((unsigned)(120*HZ))
125#define TCP_RTO_MIN ((unsigned)(HZ/5))
fd4f2cea 126#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
9ad7c049
JC
127#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
128 * used as a fallback RTO for the
129 * initial data transmission if no
130 * valid RTT sample has been acquired,
131 * most likely due to retrans in 3WHS.
132 */
1da177e4
LT
133
134#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
135 * for local resources.
136 */
137
138#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
139#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
140#define TCP_KEEPALIVE_INTVL (75*HZ)
141
142#define MAX_TCP_KEEPIDLE 32767
143#define MAX_TCP_KEEPINTVL 32767
144#define MAX_TCP_KEEPCNT 127
145#define MAX_TCP_SYNCNT 127
146
147#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
1da177e4
LT
148
149#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
150#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
151 * after this time. It should be equal
152 * (or greater than) TCP_TIMEWAIT_LEN
153 * to provide reliability equal to one
154 * provided by timewait state.
155 */
156#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
157 * timestamps. It must be less than
158 * minimal timewait lifetime.
159 */
1da177e4
LT
160/*
161 * TCP option
162 */
163
164#define TCPOPT_NOP 1 /* Padding */
165#define TCPOPT_EOL 0 /* End of options */
166#define TCPOPT_MSS 2 /* Segment size negotiating */
167#define TCPOPT_WINDOW 3 /* Window scaling */
168#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
169#define TCPOPT_SACK 5 /* SACK Block */
170#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
cfb6eeb4 171#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
435cf559 172#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
1da177e4
LT
173
174/*
175 * TCP option lengths
176 */
177
178#define TCPOLEN_MSS 4
179#define TCPOLEN_WINDOW 3
180#define TCPOLEN_SACK_PERM 2
181#define TCPOLEN_TIMESTAMP 10
cfb6eeb4 182#define TCPOLEN_MD5SIG 18
435cf559
WAS
183#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
184#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
185#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
186#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
1da177e4
LT
187
188/* But this is what stacks really send out. */
189#define TCPOLEN_TSTAMP_ALIGNED 12
190#define TCPOLEN_WSCALE_ALIGNED 4
191#define TCPOLEN_SACKPERM_ALIGNED 4
192#define TCPOLEN_SACK_BASE 2
193#define TCPOLEN_SACK_BASE_ALIGNED 4
194#define TCPOLEN_SACK_PERBLOCK 8
cfb6eeb4 195#define TCPOLEN_MD5SIG_ALIGNED 20
33ad798c 196#define TCPOLEN_MSS_ALIGNED 4
1da177e4 197
1da177e4
LT
198/* Flags in tp->nonagle */
199#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
200#define TCP_NAGLE_CORK 2 /* Socket is corked */
caa20d9a 201#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
1da177e4 202
36e31b0a
AP
203/* TCP thin-stream limits */
204#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
205
7eb38527 206/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
442b9635
DM
207#define TCP_INIT_CWND 10
208
295ff7ed
ACM
209extern struct inet_timewait_death_row tcp_death_row;
210
1da177e4 211/* sysctl variables for tcp */
1da177e4
LT
212extern int sysctl_tcp_timestamps;
213extern int sysctl_tcp_window_scaling;
214extern int sysctl_tcp_sack;
215extern int sysctl_tcp_fin_timeout;
1da177e4
LT
216extern int sysctl_tcp_keepalive_time;
217extern int sysctl_tcp_keepalive_probes;
218extern int sysctl_tcp_keepalive_intvl;
219extern int sysctl_tcp_syn_retries;
220extern int sysctl_tcp_synack_retries;
221extern int sysctl_tcp_retries1;
222extern int sysctl_tcp_retries2;
223extern int sysctl_tcp_orphan_retries;
224extern int sysctl_tcp_syncookies;
225extern int sysctl_tcp_retrans_collapse;
226extern int sysctl_tcp_stdurg;
227extern int sysctl_tcp_rfc1337;
228extern int sysctl_tcp_abort_on_overflow;
229extern int sysctl_tcp_max_orphans;
1da177e4
LT
230extern int sysctl_tcp_fack;
231extern int sysctl_tcp_reordering;
232extern int sysctl_tcp_ecn;
233extern int sysctl_tcp_dsack;
1da177e4
LT
234extern int sysctl_tcp_wmem[3];
235extern int sysctl_tcp_rmem[3];
236extern int sysctl_tcp_app_win;
237extern int sysctl_tcp_adv_win_scale;
238extern int sysctl_tcp_tw_reuse;
239extern int sysctl_tcp_frto;
3cfe3baa 240extern int sysctl_tcp_frto_response;
1da177e4 241extern int sysctl_tcp_low_latency;
95937825 242extern int sysctl_tcp_dma_copybreak;
1da177e4 243extern int sysctl_tcp_nometrics_save;
1da177e4
LT
244extern int sysctl_tcp_moderate_rcvbuf;
245extern int sysctl_tcp_tso_win_divisor;
9772efb9 246extern int sysctl_tcp_abc;
5d424d5a
JH
247extern int sysctl_tcp_mtu_probing;
248extern int sysctl_tcp_base_mss;
15d99e02 249extern int sysctl_tcp_workaround_signed_windows;
35089bb2 250extern int sysctl_tcp_slow_start_after_idle;
886236c1 251extern int sysctl_tcp_max_ssthresh;
519855c5 252extern int sysctl_tcp_cookie_size;
36e31b0a 253extern int sysctl_tcp_thin_linear_timeouts;
7e380175 254extern int sysctl_tcp_thin_dupack;
eed530b6 255extern int sysctl_tcp_early_retrans;
1da177e4 256
8d987e5c 257extern atomic_long_t tcp_memory_allocated;
1748376b 258extern struct percpu_counter tcp_sockets_allocated;
1da177e4
LT
259extern int tcp_memory_pressure;
260
1da177e4
LT
261/*
262 * The next routines deal with comparing 32 bit unsigned ints
263 * and worry about wraparound (automatic with unsigned arithmetic).
264 */
265
a2a385d6 266static inline bool before(__u32 seq1, __u32 seq2)
1da177e4 267{
0d630cc0 268 return (__s32)(seq1-seq2) < 0;
1da177e4 269}
9a036b9c 270#define after(seq2, seq1) before(seq1, seq2)
1da177e4
LT
271
272/* is s2<=s1<=s3 ? */
a2a385d6 273static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
1da177e4
LT
274{
275 return seq3 - seq2 >= seq1 - seq2;
276}
277
efcdbf24
AS
278static inline bool tcp_out_of_memory(struct sock *sk)
279{
280 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
281 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
282 return true;
283 return false;
284}
285
ad1af0fe 286static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
e4fd5da3 287{
ad1af0fe
DM
288 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
289 int orphans = percpu_counter_read_positive(ocp);
290
291 if (orphans << shift > sysctl_tcp_max_orphans) {
292 orphans = percpu_counter_sum_positive(ocp);
293 if (orphans << shift > sysctl_tcp_max_orphans)
294 return true;
295 }
ad1af0fe 296 return false;
e4fd5da3 297}
1da177e4 298
efcdbf24
AS
299extern bool tcp_check_oom(struct sock *sk, int shift);
300
a0f82f64
FW
301/* syncookies: remember time of last synqueue overflow */
302static inline void tcp_synq_overflow(struct sock *sk)
303{
304 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
305}
306
307/* syncookies: no recent synqueue overflow on this listening socket? */
a2a385d6 308static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
a0f82f64
FW
309{
310 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
9ad7c049 311 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
a0f82f64
FW
312}
313
1da177e4
LT
314extern struct proto tcp_prot;
315
57ef42d5
PE
316#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
317#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
318#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
319#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
aa2ea058 320#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
1da177e4 321
4acb4190
GC
322extern void tcp_init_mem(struct net *net);
323
53d3176b
CG
324extern void tcp_v4_err(struct sk_buff *skb, u32);
325
326extern void tcp_shutdown (struct sock *sk, int how);
327
160eb5a6 328extern void tcp_v4_early_demux(struct sk_buff *skb);
53d3176b
CG
329extern int tcp_v4_rcv(struct sk_buff *skb);
330
4670fd81 331extern struct inet_peer *tcp_v4_get_peer(struct sock *sk);
53d3176b 332extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
7ba42910
CG
333extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
334 size_t size);
335extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
336 size_t size, int flags);
53d3176b
CG
337extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
338extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
cf533ea5 339 const struct tcphdr *th, unsigned int len);
53d3176b 340extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
cf533ea5 341 const struct tcphdr *th, unsigned int len);
53d3176b
CG
342extern void tcp_rcv_space_adjust(struct sock *sk);
343extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
344extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
345extern void tcp_twsk_destructor(struct sock *sk);
346extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
347 struct pipe_inode_info *pipe, size_t len,
348 unsigned int flags);
9c55e01c 349
463c84b9
ACM
350static inline void tcp_dec_quickack_mode(struct sock *sk,
351 const unsigned int pkts)
1da177e4 352{
463c84b9 353 struct inet_connection_sock *icsk = inet_csk(sk);
fc6415bc 354
463c84b9
ACM
355 if (icsk->icsk_ack.quick) {
356 if (pkts >= icsk->icsk_ack.quick) {
357 icsk->icsk_ack.quick = 0;
fc6415bc 358 /* Leaving quickack mode we deflate ATO. */
463c84b9 359 icsk->icsk_ack.ato = TCP_ATO_MIN;
fc6415bc 360 } else
463c84b9 361 icsk->icsk_ack.quick -= pkts;
1da177e4
LT
362 }
363}
364
bdf1ee5d
IJ
365#define TCP_ECN_OK 1
366#define TCP_ECN_QUEUE_CWR 2
367#define TCP_ECN_DEMAND_CWR 4
7a269ffa 368#define TCP_ECN_SEEN 8
bdf1ee5d 369
fd2c3ef7 370enum tcp_tw_status {
1da177e4
LT
371 TCP_TW_SUCCESS = 0,
372 TCP_TW_RST = 1,
373 TCP_TW_ACK = 2,
374 TCP_TW_SYN = 3
375};
376
377
53d3176b
CG
378extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
379 struct sk_buff *skb,
380 const struct tcphdr *th);
381extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
382 struct request_sock *req,
383 struct request_sock **prev);
384extern int tcp_child_process(struct sock *parent, struct sock *child,
385 struct sk_buff *skb);
a2a385d6 386extern bool tcp_use_frto(struct sock *sk);
53d3176b
CG
387extern void tcp_enter_frto(struct sock *sk);
388extern void tcp_enter_loss(struct sock *sk, int how);
389extern void tcp_clear_retrans(struct tcp_sock *tp);
390extern void tcp_update_metrics(struct sock *sk);
4aabd8ef 391extern void tcp_init_metrics(struct sock *sk);
51c5d0c4 392extern void tcp_metrics_init(void);
ab92bb2f 393extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
4aabd8ef 394extern void tcp_disable_fack(struct tcp_sock *tp);
53d3176b 395extern void tcp_close(struct sock *sk, long timeout);
900f65d3 396extern void tcp_init_sock(struct sock *sk);
53d3176b
CG
397extern unsigned int tcp_poll(struct file * file, struct socket *sock,
398 struct poll_table_struct *wait);
399extern int tcp_getsockopt(struct sock *sk, int level, int optname,
400 char __user *optval, int __user *optlen);
401extern int tcp_setsockopt(struct sock *sk, int level, int optname,
402 char __user *optval, unsigned int optlen);
403extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
404 char __user *optval, int __user *optlen);
405extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
406 char __user *optval, unsigned int optlen);
407extern void tcp_set_keepalive(struct sock *sk, int val);
408extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
409extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
410 size_t len, int nonblock, int flags, int *addr_len);
cf533ea5
ED
411extern void tcp_parse_options(const struct sk_buff *skb,
412 struct tcp_options_received *opt_rx, const u8 **hvpp,
53d3176b 413 int estab);
cf533ea5 414extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
7d5d5525 415
1da177e4
LT
416/*
417 * TCP v4 functions exported for the inet6 API
418 */
419
53d3176b
CG
420extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
421extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
422extern struct sock * tcp_create_openreq_child(struct sock *sk,
423 struct request_sock *req,
1da177e4 424 struct sk_buff *skb);
53d3176b
CG
425extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
426 struct request_sock *req,
427 struct dst_entry *dst);
428extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
429extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
430 int addr_len);
431extern int tcp_connect(struct sock *sk);
432extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
433 struct request_sock *req,
434 struct request_values *rvp);
435extern int tcp_disconnect(struct sock *sk, int flags);
1da177e4 436
370816ae
PE
437void tcp_connect_init(struct sock *sk);
438void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
292e8d8c 439int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
1da177e4 440
1da177e4 441/* From syncookies.c */
2051f11f 442extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
1da177e4
LT
443extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
444 struct ip_options *opt);
e05c82d3 445#ifdef CONFIG_SYN_COOKIES
1da177e4
LT
446extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
447 __u16 *mss);
e05c82d3
ED
448#else
449static inline __u32 cookie_v4_init_sequence(struct sock *sk,
450 struct sk_buff *skb,
451 __u16 *mss)
452{
453 return 0;
454}
455#endif
1da177e4 456
4dfc2817 457extern __u32 cookie_init_timestamp(struct request_sock *req);
172d69e6 458extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
4dfc2817 459
c6aefafb
GG
460/* From net/ipv6/syncookies.c */
461extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
e05c82d3 462#ifdef CONFIG_SYN_COOKIES
cf533ea5 463extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
c6aefafb 464 __u16 *mss);
e05c82d3
ED
465#else
466static inline __u32 cookie_v6_init_sequence(struct sock *sk,
467 struct sk_buff *skb,
468 __u16 *mss)
469{
470 return 0;
471}
472#endif
1da177e4
LT
473/* tcp_output.c */
474
9e412ba7
IJ
475extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
476 int nonagle);
a2a385d6 477extern bool tcp_may_send_now(struct sock *sk);
1da177e4 478extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
f1ecd5d9 479extern void tcp_retransmit_timer(struct sock *sk);
1da177e4
LT
480extern void tcp_xmit_retransmit_queue(struct sock *);
481extern void tcp_simple_retransmit(struct sock *);
482extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
6475be16 483extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
1da177e4
LT
484
485extern void tcp_send_probe0(struct sock *);
486extern void tcp_send_partial(struct sock *);
53d3176b 487extern int tcp_write_wakeup(struct sock *);
1da177e4 488extern void tcp_send_fin(struct sock *sk);
dd0fc66f 489extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
53d3176b 490extern int tcp_send_synack(struct sock *);
a2a385d6
ED
491extern bool tcp_syn_flood_action(struct sock *sk,
492 const struct sk_buff *skb,
493 const char *proto);
c1b4a7e6 494extern void tcp_push_one(struct sock *, unsigned int mss_now);
1da177e4
LT
495extern void tcp_send_ack(struct sock *sk);
496extern void tcp_send_delayed_ack(struct sock *sk);
497
a762a980
DM
498/* tcp_input.c */
499extern void tcp_cwnd_application_limited(struct sock *sk);
750ea2ba
YC
500extern void tcp_resume_early_retransmit(struct sock *sk);
501extern void tcp_rearm_rto(struct sock *sk);
a762a980 502
1da177e4
LT
503/* tcp_timer.c */
504extern void tcp_init_xmit_timers(struct sock *);
463c84b9
ACM
505static inline void tcp_clear_xmit_timers(struct sock *sk)
506{
507 inet_csk_clear_xmit_timers(sk);
508}
1da177e4 509
1da177e4 510extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
0c54b85f
IJ
511extern unsigned int tcp_current_mss(struct sock *sk);
512
513/* Bound MSS / TSO packet size with the half of the window */
514static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
515{
01f83d69
AK
516 int cutoff;
517
518 /* When peer uses tiny windows, there is no use in packetizing
519 * to sub-MSS pieces for the sake of SWS or making sure there
520 * are enough packets in the pipe for fast recovery.
521 *
522 * On the other hand, for extremely large MSS devices, handling
523 * smaller than MSS windows in this way does make sense.
524 */
525 if (tp->max_window >= 512)
526 cutoff = (tp->max_window >> 1);
527 else
528 cutoff = tp->max_window;
529
530 if (cutoff && pktsize > cutoff)
531 return max_t(int, cutoff, 68U - tp->tcp_header_len);
0c54b85f
IJ
532 else
533 return pktsize;
534}
1da177e4 535
17b085ea 536/* tcp.c */
cf533ea5 537extern void tcp_get_info(const struct sock *, struct tcp_info *);
1da177e4
LT
538
539/* Read 'sendfile()'-style from a TCP socket */
540typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
541 unsigned int, size_t);
542extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
543 sk_read_actor_t recv_actor);
544
40efc6fa 545extern void tcp_initialize_rcv_mss(struct sock *sk);
1da177e4 546
67469601
ED
547extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
548extern int tcp_mss_to_mtu(struct sock *sk, int mss);
5d424d5a 549extern void tcp_mtup_init(struct sock *sk);
9ad7c049 550extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
5d424d5a 551
f1ecd5d9
DL
552static inline void tcp_bound_rto(const struct sock *sk)
553{
554 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
555 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
556}
557
558static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
559{
560 return (tp->srtt >> 3) + tp->rttvar;
561}
562
4aabd8ef
DM
563extern void tcp_set_rto(struct sock *sk);
564
40efc6fa 565static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
1da177e4
LT
566{
567 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
568 ntohl(TCP_FLAG_ACK) |
569 snd_wnd);
570}
571
40efc6fa 572static inline void tcp_fast_path_on(struct tcp_sock *tp)
1da177e4
LT
573{
574 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
575}
576
9e412ba7 577static inline void tcp_fast_path_check(struct sock *sk)
1da177e4 578{
9e412ba7
IJ
579 struct tcp_sock *tp = tcp_sk(sk);
580
b03efcfb 581 if (skb_queue_empty(&tp->out_of_order_queue) &&
1da177e4
LT
582 tp->rcv_wnd &&
583 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
584 !tp->urg_data)
585 tcp_fast_path_on(tp);
586}
587
0c266898
SS
588/* Compute the actual rto_min value */
589static inline u32 tcp_rto_min(struct sock *sk)
590{
cf533ea5 591 const struct dst_entry *dst = __sk_dst_get(sk);
0c266898
SS
592 u32 rto_min = TCP_RTO_MIN;
593
594 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
595 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
596 return rto_min;
597}
598
1da177e4
LT
599/* Compute the actual receive window we are currently advertising.
600 * Rcv_nxt can be after the window if our peer push more data
601 * than the offered window.
602 */
40efc6fa 603static inline u32 tcp_receive_window(const struct tcp_sock *tp)
1da177e4
LT
604{
605 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
606
607 if (win < 0)
608 win = 0;
609 return (u32) win;
610}
611
612/* Choose a new window, without checks for shrinking, and without
613 * scaling applied to the result. The caller does these things
614 * if necessary. This is a "raw" window selection.
615 */
53d3176b 616extern u32 __tcp_select_window(struct sock *sk);
1da177e4 617
ee995283
PE
618void tcp_send_window_probe(struct sock *sk);
619
1da177e4
LT
620/* TCP timestamps are only 32-bits, this causes a slight
621 * complication on 64-bit systems since we store a snapshot
31f34269
SH
622 * of jiffies in the buffer control blocks below. We decided
623 * to use only the low 32-bits of jiffies and hide the ugly
1da177e4
LT
624 * casts with the following macro.
625 */
626#define tcp_time_stamp ((__u32)(jiffies))
627
a3433f35
CG
628#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
629
630#define TCPHDR_FIN 0x01
631#define TCPHDR_SYN 0x02
632#define TCPHDR_RST 0x04
633#define TCPHDR_PSH 0x08
634#define TCPHDR_ACK 0x10
635#define TCPHDR_URG 0x20
636#define TCPHDR_ECE 0x40
637#define TCPHDR_CWR 0x80
638
caa20d9a 639/* This is what the send packet queuing engine uses to pass
f86586fa
ED
640 * TCP per-packet control information to the transmission code.
641 * We also store the host-order sequence numbers in here too.
642 * This is 44 bytes if IPV6 is enabled.
643 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1da177e4
LT
644 */
645struct tcp_skb_cb {
646 union {
647 struct inet_skb_parm h4;
dfd56b8b 648#if IS_ENABLED(CONFIG_IPV6)
1da177e4
LT
649 struct inet6_skb_parm h6;
650#endif
651 } header; /* For incoming frames */
652 __u32 seq; /* Starting sequence number */
653 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
654 __u32 when; /* used to compute rtt's */
4de075e0 655 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
f4f9f6e7 656
1da177e4
LT
657 __u8 sacked; /* State flags for SACK/FACK. */
658#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
659#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
660#define TCPCB_LOST 0x04 /* SKB is lost */
661#define TCPCB_TAGBITS 0x07 /* All tag bits */
1da177e4
LT
662#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
663#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
664
f4f9f6e7
NC
665 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
666 /* 1 byte hole */
1da177e4
LT
667 __u32 ack_seq; /* Sequence number ACK'd */
668};
669
670#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
671
bd14b1b2
ED
672/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
673 *
674 * If we receive a SYN packet with these bits set, it means a network is
675 * playing bad games with TOS bits. In order to avoid possible false congestion
676 * notifications, we disable TCP ECN negociation.
677 */
678static inline void
679TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb)
680{
681 const struct tcphdr *th = tcp_hdr(skb);
682
683 if (sysctl_tcp_ecn && th->ece && th->cwr &&
684 INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
685 inet_rsk(req)->ecn_ok = 1;
686}
687
1da177e4
LT
688/* Due to TSO, an SKB can be composed of multiple actual
689 * packets. To keep these tracked properly, we use this.
690 */
691static inline int tcp_skb_pcount(const struct sk_buff *skb)
692{
7967168c 693 return skb_shinfo(skb)->gso_segs;
1da177e4
LT
694}
695
696/* This is valid iff tcp_skb_pcount() > 1. */
697static inline int tcp_skb_mss(const struct sk_buff *skb)
698{
7967168c 699 return skb_shinfo(skb)->gso_size;
1da177e4
LT
700}
701
317a76f9
SH
702/* Events passed to congestion control interface */
703enum tcp_ca_event {
704 CA_EVENT_TX_START, /* first transmit when no packets in flight */
705 CA_EVENT_CWND_RESTART, /* congestion window restart */
706 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
707 CA_EVENT_FRTO, /* fast recovery timeout */
708 CA_EVENT_LOSS, /* loss timeout */
709 CA_EVENT_FAST_ACK, /* in sequence ack */
710 CA_EVENT_SLOW_ACK, /* other ack */
711};
712
713/*
714 * Interface for adding new TCP congestion control handlers
715 */
716#define TCP_CA_NAME_MAX 16
3ff825b2
SH
717#define TCP_CA_MAX 128
718#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
719
164891aa
SH
720#define TCP_CONG_NON_RESTRICTED 0x1
721#define TCP_CONG_RTT_STAMP 0x2
722
317a76f9
SH
723struct tcp_congestion_ops {
724 struct list_head list;
164891aa 725 unsigned long flags;
317a76f9
SH
726
727 /* initialize private data (optional) */
6687e988 728 void (*init)(struct sock *sk);
317a76f9 729 /* cleanup private data (optional) */
6687e988 730 void (*release)(struct sock *sk);
317a76f9
SH
731
732 /* return slow start threshold (required) */
6687e988 733 u32 (*ssthresh)(struct sock *sk);
317a76f9 734 /* lower bound for congestion window (optional) */
72dc5b92 735 u32 (*min_cwnd)(const struct sock *sk);
317a76f9 736 /* do new cwnd calculation (required) */
c3a05c60 737 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
317a76f9 738 /* call before changing ca_state (optional) */
6687e988 739 void (*set_state)(struct sock *sk, u8 new_state);
317a76f9 740 /* call when cwnd event occurs (optional) */
6687e988 741 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
317a76f9 742 /* new value of cwnd after loss (optional) */
6687e988 743 u32 (*undo_cwnd)(struct sock *sk);
317a76f9 744 /* hook for packet ack accounting (optional) */
30cfd0ba 745 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
73c1f4a0 746 /* get info for inet_diag (optional) */
6687e988 747 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
317a76f9
SH
748
749 char name[TCP_CA_NAME_MAX];
750 struct module *owner;
751};
752
753extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
754extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
755
6687e988
ACM
756extern void tcp_init_congestion_control(struct sock *sk);
757extern void tcp_cleanup_congestion_control(struct sock *sk);
317a76f9
SH
758extern int tcp_set_default_congestion_control(const char *name);
759extern void tcp_get_default_congestion_control(char *name);
3ff825b2 760extern void tcp_get_available_congestion_control(char *buf, size_t len);
ce7bc3bf
SH
761extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
762extern int tcp_set_allowed_congestion_control(char *allowed);
6687e988 763extern int tcp_set_congestion_control(struct sock *sk, const char *name);
40efc6fa 764extern void tcp_slow_start(struct tcp_sock *tp);
758ce5c8 765extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
317a76f9 766
5f8ef48d 767extern struct tcp_congestion_ops tcp_init_congestion_ops;
6687e988 768extern u32 tcp_reno_ssthresh(struct sock *sk);
c3a05c60 769extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
72dc5b92 770extern u32 tcp_reno_min_cwnd(const struct sock *sk);
a8acfbac 771extern struct tcp_congestion_ops tcp_reno;
317a76f9 772
6687e988 773static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
317a76f9 774{
6687e988
ACM
775 struct inet_connection_sock *icsk = inet_csk(sk);
776
777 if (icsk->icsk_ca_ops->set_state)
778 icsk->icsk_ca_ops->set_state(sk, ca_state);
779 icsk->icsk_ca_state = ca_state;
317a76f9
SH
780}
781
6687e988 782static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
317a76f9 783{
6687e988
ACM
784 const struct inet_connection_sock *icsk = inet_csk(sk);
785
786 if (icsk->icsk_ca_ops->cwnd_event)
787 icsk->icsk_ca_ops->cwnd_event(sk, event);
317a76f9
SH
788}
789
e60402d0
IJ
790/* These functions determine how the current flow behaves in respect of SACK
791 * handling. SACK is negotiated with the peer, and therefore it can vary
792 * between different flows.
793 *
794 * tcp_is_sack - SACK enabled
795 * tcp_is_reno - No SACK
796 * tcp_is_fack - FACK enabled, implies SACK enabled
797 */
798static inline int tcp_is_sack(const struct tcp_sock *tp)
799{
800 return tp->rx_opt.sack_ok;
801}
802
a2a385d6 803static inline bool tcp_is_reno(const struct tcp_sock *tp)
e60402d0
IJ
804{
805 return !tcp_is_sack(tp);
806}
807
a2a385d6 808static inline bool tcp_is_fack(const struct tcp_sock *tp)
e60402d0 809{
ab56222a 810 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
e60402d0
IJ
811}
812
813static inline void tcp_enable_fack(struct tcp_sock *tp)
814{
ab56222a 815 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
e60402d0
IJ
816}
817
eed530b6
YC
818/* TCP early-retransmit (ER) is similar to but more conservative than
819 * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
820 */
821static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
822{
823 tp->do_early_retrans = sysctl_tcp_early_retrans &&
824 !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3;
750ea2ba 825 tp->early_retrans_delayed = 0;
eed530b6
YC
826}
827
828static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
829{
830 tp->do_early_retrans = 0;
831}
832
83ae4088
IJ
833static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
834{
835 return tp->sacked_out + tp->lost_out;
836}
837
1da177e4
LT
838/* This determines how many packets are "in the network" to the best
839 * of our knowledge. In many cases it is conservative, but where
840 * detailed information is available from the receiver (via SACK
841 * blocks etc.) we can make more aggressive calculations.
842 *
843 * Use this for decisions involving congestion control, use just
844 * tp->packets_out to determine if the send queue is empty or not.
845 *
846 * Read this equation as:
847 *
848 * "Packets sent once on transmission queue" MINUS
849 * "Packets left network, but not honestly ACKed yet" PLUS
850 * "Packets fast retransmitted"
851 */
40efc6fa 852static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1da177e4 853{
83ae4088 854 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1da177e4
LT
855}
856
0b6a05c1
IJ
857#define TCP_INFINITE_SSTHRESH 0x7fffffff
858
859static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
860{
861 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
862}
863
1da177e4
LT
864/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
865 * The exception is rate halving phase, when cwnd is decreasing towards
866 * ssthresh.
867 */
6687e988 868static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1da177e4 869{
6687e988 870 const struct tcp_sock *tp = tcp_sk(sk);
cf533ea5 871
6687e988 872 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
1da177e4
LT
873 return tp->snd_ssthresh;
874 else
875 return max(tp->snd_ssthresh,
876 ((tp->snd_cwnd >> 1) +
877 (tp->snd_cwnd >> 2)));
878}
879
b9c4595b
IJ
880/* Use define here intentionally to get WARN_ON location shown at the caller */
881#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1da177e4 882
3cfe3baa 883extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
cf533ea5 884extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1da177e4 885
6b5a5c0d
NC
886/* The maximum number of MSS of available cwnd for which TSO defers
887 * sending if not using sysctl_tcp_tso_win_divisor.
888 */
889static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
890{
891 return 3;
892}
893
1da177e4 894/* Slow start with delack produces 3 packets of burst, so that
dd9e0dda
JH
895 * it is safe "de facto". This will be the default - same as
896 * the default reordering threshold - but if reordering increases,
897 * we must be able to allow cwnd to burst at least this much in order
898 * to not pull it back when holes are filled.
1da177e4
LT
899 */
900static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
901{
dd9e0dda 902 return tp->reordering;
1da177e4
LT
903}
904
90840def
IJ
905/* Returns end sequence number of the receiver's advertised window */
906static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
907{
908 return tp->snd_una + tp->snd_wnd;
909}
a2a385d6 910extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
f4805ede 911
c1bd24b7 912static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
40efc6fa 913 const struct sk_buff *skb)
1da177e4
LT
914{
915 if (skb->len < mss)
916 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
917}
918
9e412ba7 919static inline void tcp_check_probe_timer(struct sock *sk)
1da177e4 920{
cf533ea5 921 const struct tcp_sock *tp = tcp_sk(sk);
463c84b9 922 const struct inet_connection_sock *icsk = inet_csk(sk);
9e412ba7 923
463c84b9 924 if (!tp->packets_out && !icsk->icsk_pending)
3f421baa
ACM
925 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
926 icsk->icsk_rto, TCP_RTO_MAX);
1da177e4
LT
927}
928
ee7537b6 929static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1da177e4
LT
930{
931 tp->snd_wl1 = seq;
932}
933
ee7537b6 934static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1da177e4
LT
935{
936 tp->snd_wl1 = seq;
937}
938
1da177e4
LT
939/*
940 * Calculate(/check) TCP checksum
941 */
ba7808ea
FD
942static inline __sum16 tcp_v4_check(int len, __be32 saddr,
943 __be32 daddr, __wsum base)
1da177e4
LT
944{
945 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
946}
947
b51655b9 948static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1da177e4 949{
fb286bb2 950 return __skb_checksum_complete(skb);
1da177e4
LT
951}
952
a2a385d6 953static inline bool tcp_checksum_complete(struct sk_buff *skb)
1da177e4 954{
60476372 955 return !skb_csum_unnecessary(skb) &&
1da177e4
LT
956 __tcp_checksum_complete(skb);
957}
958
959/* Prequeue for VJ style copy to user, combined with checksumming. */
960
40efc6fa 961static inline void tcp_prequeue_init(struct tcp_sock *tp)
1da177e4
LT
962{
963 tp->ucopy.task = NULL;
964 tp->ucopy.len = 0;
965 tp->ucopy.memory = 0;
966 skb_queue_head_init(&tp->ucopy.prequeue);
97fc2f08
CL
967#ifdef CONFIG_NET_DMA
968 tp->ucopy.dma_chan = NULL;
969 tp->ucopy.wakeup = 0;
970 tp->ucopy.pinned_list = NULL;
971 tp->ucopy.dma_cookie = 0;
972#endif
1da177e4
LT
973}
974
975/* Packet is added to VJ-style prequeue for processing in process
976 * context, if a reader task is waiting. Apparently, this exciting
977 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
978 * failed somewhere. Latency? Burstiness? Well, at least now we will
979 * see, why it failed. 8)8) --ANK
980 *
981 * NOTE: is this not too big to inline?
982 */
a2a385d6 983static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
984{
985 struct tcp_sock *tp = tcp_sk(sk);
986
f5f8d86b 987 if (sysctl_tcp_low_latency || !tp->ucopy.task)
a2a385d6 988 return false;
f5f8d86b
ED
989
990 __skb_queue_tail(&tp->ucopy.prequeue, skb);
991 tp->ucopy.memory += skb->truesize;
992 if (tp->ucopy.memory > sk->sk_rcvbuf) {
993 struct sk_buff *skb1;
994
995 BUG_ON(sock_owned_by_user(sk));
996
997 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
998 sk_backlog_rcv(sk, skb1);
999 NET_INC_STATS_BH(sock_net(sk),
1000 LINUX_MIB_TCPPREQUEUEDROPPED);
1da177e4 1001 }
f5f8d86b
ED
1002
1003 tp->ucopy.memory = 0;
1004 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
aa395145 1005 wake_up_interruptible_sync_poll(sk_sleep(sk),
7aedec2a 1006 POLLIN | POLLRDNORM | POLLRDBAND);
f5f8d86b
ED
1007 if (!inet_csk_ack_scheduled(sk))
1008 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
22f6dacd 1009 (3 * tcp_rto_min(sk)) / 4,
f5f8d86b 1010 TCP_RTO_MAX);
1da177e4 1011 }
a2a385d6 1012 return true;
1da177e4
LT
1013}
1014
1015
1016#undef STATE_TRACE
1017
1018#ifdef STATE_TRACE
1019static const char *statename[]={
1020 "Unused","Established","Syn Sent","Syn Recv",
1021 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1022 "Close Wait","Last ACK","Listen","Closing"
1023};
1024#endif
490d5046 1025extern void tcp_set_state(struct sock *sk, int state);
1da177e4 1026
4ac02bab 1027extern void tcp_done(struct sock *sk);
1da177e4 1028
40efc6fa 1029static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1da177e4
LT
1030{
1031 rx_opt->dsack = 0;
1da177e4
LT
1032 rx_opt->num_sacks = 0;
1033}
1034
1da177e4
LT
1035/* Determine a window scaling and initial window to offer. */
1036extern void tcp_select_initial_window(int __space, __u32 mss,
1037 __u32 *rcv_wnd, __u32 *window_clamp,
31d12926 1038 int wscale_ok, __u8 *rcv_wscale,
1039 __u32 init_rcv_wnd);
1da177e4
LT
1040
1041static inline int tcp_win_from_space(int space)
1042{
1043 return sysctl_tcp_adv_win_scale<=0 ?
1044 (space>>(-sysctl_tcp_adv_win_scale)) :
1045 space - (space>>sysctl_tcp_adv_win_scale);
1046}
1047
1048/* Note: caller must be prepared to deal with negative returns */
1049static inline int tcp_space(const struct sock *sk)
1050{
1051 return tcp_win_from_space(sk->sk_rcvbuf -
1052 atomic_read(&sk->sk_rmem_alloc));
1053}
1054
1055static inline int tcp_full_space(const struct sock *sk)
1056{
1057 return tcp_win_from_space(sk->sk_rcvbuf);
1058}
1059
40efc6fa
SH
1060static inline void tcp_openreq_init(struct request_sock *req,
1061 struct tcp_options_received *rx_opt,
1062 struct sk_buff *skb)
1da177e4 1063{
2e6599cb
ACM
1064 struct inet_request_sock *ireq = inet_rsk(req);
1065
1da177e4 1066 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
4dfc2817 1067 req->cookie_ts = 0;
2e6599cb 1068 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1da177e4
LT
1069 req->mss = rx_opt->mss_clamp;
1070 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
2e6599cb
ACM
1071 ireq->tstamp_ok = rx_opt->tstamp_ok;
1072 ireq->sack_ok = rx_opt->sack_ok;
1073 ireq->snd_wscale = rx_opt->snd_wscale;
1074 ireq->wscale_ok = rx_opt->wscale_ok;
1075 ireq->acked = 0;
1076 ireq->ecn_ok = 0;
aa8223c7 1077 ireq->rmt_port = tcp_hdr(skb)->source;
a3116ac5 1078 ireq->loc_port = tcp_hdr(skb)->dest;
1da177e4
LT
1079}
1080
5c52ba17 1081extern void tcp_enter_memory_pressure(struct sock *sk);
1da177e4 1082
1da177e4
LT
1083static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1084{
1085 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1086}
1087
1088static inline int keepalive_time_when(const struct tcp_sock *tp)
1089{
1090 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1091}
1092
df19a626
ED
1093static inline int keepalive_probes(const struct tcp_sock *tp)
1094{
1095 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1096}
1097
6c37e5de
FL
1098static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1099{
1100 const struct inet_connection_sock *icsk = &tp->inet_conn;
1101
1102 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1103 tcp_time_stamp - tp->rcv_tstamp);
1104}
1105
463c84b9 1106static inline int tcp_fin_time(const struct sock *sk)
1da177e4 1107{
463c84b9
ACM
1108 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1109 const int rto = inet_csk(sk)->icsk_rto;
1da177e4 1110
463c84b9
ACM
1111 if (fin_timeout < (rto << 2) - (rto >> 1))
1112 fin_timeout = (rto << 2) - (rto >> 1);
1da177e4
LT
1113
1114 return fin_timeout;
1115}
1116
a2a385d6
ED
1117static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1118 int paws_win)
1da177e4 1119{
c887e6d2 1120 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
a2a385d6 1121 return true;
c887e6d2 1122 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
a2a385d6 1123 return true;
bc2ce894
ED
1124 /*
1125 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1126 * then following tcp messages have valid values. Ignore 0 value,
1127 * or else 'negative' tsval might forbid us to accept their packets.
1128 */
1129 if (!rx_opt->ts_recent)
a2a385d6
ED
1130 return true;
1131 return false;
c887e6d2
IJ
1132}
1133
a2a385d6
ED
1134static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1135 int rst)
c887e6d2
IJ
1136{
1137 if (tcp_paws_check(rx_opt, 0))
a2a385d6 1138 return false;
1da177e4
LT
1139
1140 /* RST segments are not recommended to carry timestamp,
1141 and, if they do, it is recommended to ignore PAWS because
1142 "their cleanup function should take precedence over timestamps."
1143 Certainly, it is mistake. It is necessary to understand the reasons
1144 of this constraint to relax it: if peer reboots, clock may go
1145 out-of-sync and half-open connections will not be reset.
1146 Actually, the problem would be not existing if all
1147 the implementations followed draft about maintaining clock
1148 via reboots. Linux-2.2 DOES NOT!
1149
1150 However, we can relax time bounds for RST segments to MSL.
1151 */
9d729f72 1152 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
a2a385d6
ED
1153 return false;
1154 return true;
1da177e4
LT
1155}
1156
a9c19329 1157static inline void tcp_mib_init(struct net *net)
1da177e4
LT
1158{
1159 /* See RFC 2012 */
cf1100a7
PE
1160 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1161 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1162 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1163 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1da177e4
LT
1164}
1165
5af4ec23 1166/* from STCP */
ef9da47c 1167static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
0800f170 1168{
6a438bbe
SH
1169 tp->lost_skb_hint = NULL;
1170 tp->scoreboard_skb_hint = NULL;
ef9da47c
IJ
1171}
1172
1173static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1174{
1175 tcp_clear_retrans_hints_partial(tp);
6a438bbe 1176 tp->retransmit_skb_hint = NULL;
b7689205
IJ
1177}
1178
cfb6eeb4
YH
1179/* MD5 Signature */
1180struct crypto_hash;
1181
a915da9b
ED
1182union tcp_md5_addr {
1183 struct in_addr a4;
1184#if IS_ENABLED(CONFIG_IPV6)
1185 struct in6_addr a6;
1186#endif
1187};
1188
cfb6eeb4
YH
1189/* - key database */
1190struct tcp_md5sig_key {
a915da9b 1191 struct hlist_node node;
cfb6eeb4 1192 u8 keylen;
a915da9b
ED
1193 u8 family; /* AF_INET or AF_INET6 */
1194 union tcp_md5_addr addr;
1195 u8 key[TCP_MD5SIG_MAXKEYLEN];
1196 struct rcu_head rcu;
cfb6eeb4
YH
1197};
1198
1199/* - sock block */
1200struct tcp_md5sig_info {
a915da9b 1201 struct hlist_head head;
a8afca03 1202 struct rcu_head rcu;
cfb6eeb4
YH
1203};
1204
1205/* - pseudo header */
1206struct tcp4_pseudohdr {
1207 __be32 saddr;
1208 __be32 daddr;
1209 __u8 pad;
1210 __u8 protocol;
1211 __be16 len;
1212};
1213
1214struct tcp6_pseudohdr {
1215 struct in6_addr saddr;
1216 struct in6_addr daddr;
1217 __be32 len;
1218 __be32 protocol; /* including padding */
1219};
1220
1221union tcp_md5sum_block {
1222 struct tcp4_pseudohdr ip4;
dfd56b8b 1223#if IS_ENABLED(CONFIG_IPV6)
cfb6eeb4
YH
1224 struct tcp6_pseudohdr ip6;
1225#endif
1226};
1227
1228/* - pool: digest algorithm, hash description and scratch buffer */
1229struct tcp_md5sig_pool {
1230 struct hash_desc md5_desc;
1231 union tcp_md5sum_block md5_blk;
1232};
1233
cfb6eeb4 1234/* - functions */
53d3176b 1235extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
1236 const struct sock *sk,
1237 const struct request_sock *req,
1238 const struct sk_buff *skb);
a915da9b
ED
1239extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1240 int family, const u8 *newkey,
1241 u8 newkeylen, gfp_t gfp);
1242extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1243 int family);
1244extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1245 struct sock *addr_sk);
cfb6eeb4 1246
9501f972 1247#ifdef CONFIG_TCP_MD5SIG
a915da9b
ED
1248extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1249 const union tcp_md5_addr *addr, int family);
1250#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
9501f972 1251#else
a915da9b
ED
1252static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1253 const union tcp_md5_addr *addr,
1254 int family)
1255{
1256 return NULL;
1257}
9501f972
YH
1258#define tcp_twsk_md5_key(twsk) NULL
1259#endif
1260
765cf997 1261extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
53d3176b 1262extern void tcp_free_md5sig_pool(void);
cfb6eeb4 1263
35790c04 1264extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
53d3176b 1265extern void tcp_put_md5sig_pool(void);
35790c04 1266
ca35a0ef 1267extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
cf533ea5 1268extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
95c96174 1269 unsigned int header_len);
49a72dfb 1270extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
cf533ea5 1271 const struct tcp_md5sig_key *key);
cfb6eeb4 1272
fe067e8a
DM
1273/* write queue abstraction */
1274static inline void tcp_write_queue_purge(struct sock *sk)
1275{
1276 struct sk_buff *skb;
1277
1278 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
3ab224be
HA
1279 sk_wmem_free_skb(sk, skb);
1280 sk_mem_reclaim(sk);
8818a9d8 1281 tcp_clear_all_retrans_hints(tcp_sk(sk));
fe067e8a
DM
1282}
1283
cf533ea5 1284static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
fe067e8a 1285{
cd07a8ea 1286 return skb_peek(&sk->sk_write_queue);
fe067e8a
DM
1287}
1288
cf533ea5 1289static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
fe067e8a 1290{
cd07a8ea 1291 return skb_peek_tail(&sk->sk_write_queue);
fe067e8a
DM
1292}
1293
cf533ea5
ED
1294static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1295 const struct sk_buff *skb)
fe067e8a 1296{
cd07a8ea 1297 return skb_queue_next(&sk->sk_write_queue, skb);
fe067e8a
DM
1298}
1299
cf533ea5
ED
1300static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1301 const struct sk_buff *skb)
832d11c5
IJ
1302{
1303 return skb_queue_prev(&sk->sk_write_queue, skb);
1304}
1305
fe067e8a 1306#define tcp_for_write_queue(skb, sk) \
cd07a8ea 1307 skb_queue_walk(&(sk)->sk_write_queue, skb)
fe067e8a
DM
1308
1309#define tcp_for_write_queue_from(skb, sk) \
cd07a8ea 1310 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
fe067e8a 1311
234b6860 1312#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
cd07a8ea 1313 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
234b6860 1314
cf533ea5 1315static inline struct sk_buff *tcp_send_head(const struct sock *sk)
fe067e8a
DM
1316{
1317 return sk->sk_send_head;
1318}
1319
cd07a8ea
DM
1320static inline bool tcp_skb_is_last(const struct sock *sk,
1321 const struct sk_buff *skb)
1322{
1323 return skb_queue_is_last(&sk->sk_write_queue, skb);
1324}
1325
cf533ea5 1326static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
fe067e8a 1327{
cd07a8ea 1328 if (tcp_skb_is_last(sk, skb))
fe067e8a 1329 sk->sk_send_head = NULL;
cd07a8ea
DM
1330 else
1331 sk->sk_send_head = tcp_write_queue_next(sk, skb);
fe067e8a
DM
1332}
1333
1334static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1335{
1336 if (sk->sk_send_head == skb_unlinked)
1337 sk->sk_send_head = NULL;
1338}
1339
1340static inline void tcp_init_send_head(struct sock *sk)
1341{
1342 sk->sk_send_head = NULL;
1343}
1344
1345static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1346{
1347 __skb_queue_tail(&sk->sk_write_queue, skb);
1348}
1349
1350static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1351{
1352 __tcp_add_write_queue_tail(sk, skb);
1353
1354 /* Queue it, remembering where we must start sending. */
6859d494 1355 if (sk->sk_send_head == NULL) {
fe067e8a 1356 sk->sk_send_head = skb;
6859d494
IJ
1357
1358 if (tcp_sk(sk)->highest_sack == NULL)
1359 tcp_sk(sk)->highest_sack = skb;
1360 }
fe067e8a
DM
1361}
1362
1363static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1364{
1365 __skb_queue_head(&sk->sk_write_queue, skb);
1366}
1367
1368/* Insert buff after skb on the write queue of sk. */
1369static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1370 struct sk_buff *buff,
1371 struct sock *sk)
1372{
7de6c033 1373 __skb_queue_after(&sk->sk_write_queue, skb, buff);
fe067e8a
DM
1374}
1375
43f59c89 1376/* Insert new before skb on the write queue of sk. */
fe067e8a
DM
1377static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1378 struct sk_buff *skb,
1379 struct sock *sk)
1380{
43f59c89 1381 __skb_queue_before(&sk->sk_write_queue, skb, new);