2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly
;
89 int sysctl_tcp_low_latency __read_mostly
;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
95 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
98 struct inet_hashinfo tcp_hashinfo
;
99 EXPORT_SYMBOL(tcp_hashinfo
);
101 static inline __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
103 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
106 tcp_hdr(skb
)->source
);
109 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
111 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
112 struct tcp_sock
*tp
= tcp_sk(sk
);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw
->tw_ts_recent_stamp
&&
126 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
127 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
128 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
129 if (tp
->write_seq
== 0)
131 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
132 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
144 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
145 struct inet_sock
*inet
= inet_sk(sk
);
146 struct tcp_sock
*tp
= tcp_sk(sk
);
147 __be16 orig_sport
, orig_dport
;
148 __be32 daddr
, nexthop
;
152 struct ip_options_rcu
*inet_opt
;
154 if (addr_len
< sizeof(struct sockaddr_in
))
157 if (usin
->sin_family
!= AF_INET
)
158 return -EAFNOSUPPORT
;
160 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
161 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
162 sock_owned_by_user(sk
));
163 if (inet_opt
&& inet_opt
->opt
.srr
) {
166 nexthop
= inet_opt
->opt
.faddr
;
169 orig_sport
= inet
->inet_sport
;
170 orig_dport
= usin
->sin_port
;
171 fl4
= &inet
->cork
.fl
.u
.ip4
;
172 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
173 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
175 orig_sport
, orig_dport
, sk
, true);
178 if (err
== -ENETUNREACH
)
179 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
183 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
188 if (!inet_opt
|| !inet_opt
->opt
.srr
)
191 if (!inet
->inet_saddr
)
192 inet
->inet_saddr
= fl4
->saddr
;
193 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
195 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
196 /* Reset inherited state */
197 tp
->rx_opt
.ts_recent
= 0;
198 tp
->rx_opt
.ts_recent_stamp
= 0;
199 if (likely(!tp
->repair
))
203 if (tcp_death_row
.sysctl_tw_recycle
&&
204 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
205 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
207 inet
->inet_dport
= usin
->sin_port
;
208 inet
->inet_daddr
= daddr
;
210 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
212 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
214 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
221 tcp_set_state(sk
, TCP_SYN_SENT
);
222 err
= inet_hash_connect(&tcp_death_row
, sk
);
226 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
227 inet
->inet_sport
, inet
->inet_dport
, sk
);
233 /* OK, now commit destination to socket. */
234 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
235 sk_setup_caps(sk
, &rt
->dst
);
236 printk(KERN_INFO
"[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk
->sk_socket
)->i_ino
, ntohs(inet
->inet_sport
));
237 if (!tp
->write_seq
&& likely(!tp
->repair
))
238 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
243 inet
->inet_id
= tp
->write_seq
^ jiffies
;
245 err
= tcp_connect(sk
);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk
, TCP_CLOSE
);
260 sk
->sk_route_caps
= 0;
261 inet
->inet_dport
= 0;
264 EXPORT_SYMBOL(tcp_v4_connect
);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 static void tcp_v4_mtu_reduced(struct sock
*sk
)
273 struct dst_entry
*dst
;
274 struct inet_sock
*inet
= inet_sk(sk
);
275 u32 mtu
= tcp_sk(sk
)->mtu_info
;
277 dst
= inet_csk_update_pmtu(sk
, mtu
);
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
284 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
285 sk
->sk_err_soft
= EMSGSIZE
;
289 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
290 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
291 tcp_sync_mss(sk
, mtu
);
293 /* Resend the TCP packet because it's
294 * clear that the old packet has been
295 * dropped. This is the new "fast" path mtu
298 tcp_simple_retransmit(sk
);
299 } /* else let the usual retransmit timer handle it */
302 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
304 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
307 dst
->ops
->redirect(dst
, sk
, skb
);
311 * This routine is called by the ICMP module when it gets some
312 * sort of error condition. If err < 0 then the socket should
313 * be closed and the error returned to the user. If err > 0
314 * it's just the icmp type << 8 | icmp code. After adjustment
315 * header points to the first 8 bytes of the tcp header. We need
316 * to find the appropriate port.
318 * The locking strategy used here is very "optimistic". When
319 * someone else accesses the socket the ICMP is just dropped
320 * and for some paths there is no check at all.
321 * A more general error queue to queue errors for later handling
322 * is probably better.
326 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
328 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
329 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
330 struct inet_connection_sock
*icsk
;
332 struct inet_sock
*inet
;
333 const int type
= icmp_hdr(icmp_skb
)->type
;
334 const int code
= icmp_hdr(icmp_skb
)->code
;
337 struct request_sock
*req
;
341 struct net
*net
= dev_net(icmp_skb
->dev
);
343 if (icmp_skb
->len
< (iph
->ihl
<< 2) + 8) {
344 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
348 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
349 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
351 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
354 if (sk
->sk_state
== TCP_TIME_WAIT
) {
355 inet_twsk_put(inet_twsk(sk
));
360 /* If too many ICMPs get dropped on busy
361 * servers this needs to be solved differently.
362 * We do take care of PMTU discovery (RFC1191) special case :
363 * we can receive locally generated ICMP messages while socket is held.
365 if (sock_owned_by_user(sk
)) {
366 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
367 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
369 if (sk
->sk_state
== TCP_CLOSE
)
372 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
373 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
379 req
= tp
->fastopen_rsk
;
380 seq
= ntohl(th
->seq
);
381 if (sk
->sk_state
!= TCP_LISTEN
&&
382 !between(seq
, tp
->snd_una
, tp
->snd_nxt
) &&
383 (req
== NULL
|| seq
!= tcp_rsk(req
)->snt_isn
)) {
384 /* For a Fast Open socket, allow seq to be snt_isn. */
385 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
391 do_redirect(icmp_skb
, sk
);
393 case ICMP_SOURCE_QUENCH
:
394 /* Just silently ignore these. */
396 case ICMP_PARAMETERPROB
:
399 case ICMP_DEST_UNREACH
:
400 if (code
> NR_ICMP_UNREACH
)
403 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
408 if (sk
->sk_state
== TCP_LISTEN
)
412 if (!sock_owned_by_user(sk
)) {
413 tcp_v4_mtu_reduced(sk
);
415 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
421 err
= icmp_err_convert
[code
].errno
;
422 /* check if icmp_skb allows revert of backoff
423 * (see draft-zimmermann-tcp-lcd) */
424 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
426 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
430 /* XXX (TFO) - revisit the following logic for TFO */
432 if (sock_owned_by_user(sk
))
435 icsk
->icsk_backoff
--;
436 inet_csk(sk
)->icsk_rto
= (tp
->srtt
? __tcp_set_rto(tp
) :
437 TCP_TIMEOUT_INIT
) << icsk
->icsk_backoff
;
440 skb
= tcp_write_queue_head(sk
);
443 remaining
= icsk
->icsk_rto
- min(icsk
->icsk_rto
,
444 tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
447 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
448 remaining
, sysctl_tcp_rto_max
);
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk
);
456 case ICMP_TIME_EXCEEDED
:
463 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
464 * than following the TCP_SYN_RECV case and closing the socket,
465 * we ignore the ICMP error and keep trying like a fully established
466 * socket. Is this the right thing to do?
468 if (req
&& req
->sk
== NULL
)
471 switch (sk
->sk_state
) {
472 struct request_sock
*req
, **prev
;
474 if (sock_owned_by_user(sk
))
477 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
478 iph
->daddr
, iph
->saddr
);
482 /* ICMPs are not backlogged, hence we cannot get
483 an established socket here.
487 if (seq
!= tcp_rsk(req
)->snt_isn
) {
488 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
493 * Still in SYN_RECV, just remove it silently.
494 * There is no good way to pass the error to the newly
495 * created socket, and POSIX does not want network
496 * errors returned from accept().
498 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
499 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
503 case TCP_SYN_RECV
: /* Cannot happen.
504 It can f.e. if SYNs crossed,
507 if (!sock_owned_by_user(sk
)) {
510 sk
->sk_error_report(sk
);
514 sk
->sk_err_soft
= err
;
519 /* If we've already connected we will keep trying
520 * until we time out, or the user gives up.
522 * rfc1122 4.2.3.9 allows to consider as hard errors
523 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
524 * but it is obsoleted by pmtu discovery).
526 * Note, that in modern internet, where routing is unreliable
527 * and in each dark corner broken firewalls sit, sending random
528 * errors ordered by their masters even this two messages finally lose
529 * their original sense (even Linux sends invalid PORT_UNREACHs)
531 * Now we are in compliance with RFCs.
536 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
538 sk
->sk_error_report(sk
);
539 } else { /* Only an error on timeout */
540 sk
->sk_err_soft
= err
;
548 static void __tcp_v4_send_check(struct sk_buff
*skb
,
549 __be32 saddr
, __be32 daddr
)
551 struct tcphdr
*th
= tcp_hdr(skb
);
553 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
554 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
555 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
556 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
558 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
565 /* This routine computes an IPv4 TCP checksum. */
566 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
568 const struct inet_sock
*inet
= inet_sk(sk
);
570 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
572 EXPORT_SYMBOL(tcp_v4_send_check
);
574 int tcp_v4_gso_send_check(struct sk_buff
*skb
)
576 const struct iphdr
*iph
;
579 if (!pskb_may_pull(skb
, sizeof(*th
)))
586 skb
->ip_summed
= CHECKSUM_PARTIAL
;
587 __tcp_v4_send_check(skb
, iph
->saddr
, iph
->daddr
);
592 * This routine will send an RST to the other tcp.
594 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
596 * Answer: if a packet caused RST, it is not for a socket
597 * existing in our system, if it is matched to a socket,
598 * it is just duplicate segment or bug in other side's TCP.
599 * So that we build reply only basing on parameters
600 * arrived with segment.
601 * Exception: precedence violation. We do not implement it in any case.
604 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
606 const struct tcphdr
*th
= tcp_hdr(skb
);
609 #ifdef CONFIG_TCP_MD5SIG
610 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
613 struct ip_reply_arg arg
;
614 #ifdef CONFIG_TCP_MD5SIG
615 struct tcp_md5sig_key
*key
;
616 const __u8
*hash_location
= NULL
;
617 unsigned char newhash
[16];
619 struct sock
*sk1
= NULL
;
623 /* Never send a reset in response to a reset. */
627 if (skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
630 /* Swap the send and the receive. */
631 memset(&rep
, 0, sizeof(rep
));
632 rep
.th
.dest
= th
->source
;
633 rep
.th
.source
= th
->dest
;
634 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
638 rep
.th
.seq
= th
->ack_seq
;
641 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
642 skb
->len
- (th
->doff
<< 2));
645 memset(&arg
, 0, sizeof(arg
));
646 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
647 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
649 #ifdef CONFIG_TCP_MD5SIG
650 hash_location
= tcp_parse_md5sig_option(th
);
651 if (!sk
&& hash_location
) {
653 * active side is lost. Try to find listening socket through
654 * source port, and then find md5 key through listening socket.
655 * we are not loose security here:
656 * Incoming packet is checked with md5 hash with finding key,
657 * no RST generated if md5 hash doesn't match.
659 sk1
= __inet_lookup_listener(dev_net(skb_dst(skb
)->dev
),
660 &tcp_hashinfo
, ip_hdr(skb
)->saddr
,
661 th
->source
, ip_hdr(skb
)->daddr
,
662 ntohs(th
->source
), inet_iif(skb
));
663 /* don't send rst if it can't find key */
667 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
668 &ip_hdr(skb
)->saddr
, AF_INET
);
672 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
673 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
676 key
= sk
? tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
682 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
684 (TCPOPT_MD5SIG
<< 8) |
686 /* Update length and the length the header thinks exists */
687 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
688 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
690 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
691 key
, ip_hdr(skb
)->saddr
,
692 ip_hdr(skb
)->daddr
, &rep
.th
);
695 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
696 ip_hdr(skb
)->saddr
, /* XXX */
697 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
698 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
699 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
700 /* When socket is gone, all binding information is lost.
701 * routing might fail in this case. No choice here, if we choose to force
702 * input interface, we will misroute in case of asymmetric route.
705 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
707 net
= dev_net(skb_dst(skb
)->dev
);
708 arg
.tos
= ip_hdr(skb
)->tos
;
709 ip_send_unicast_reply(net
, skb
, ip_hdr(skb
)->saddr
,
710 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
712 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
713 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
715 #ifdef CONFIG_TCP_MD5SIG
724 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
725 outside socket context is ugly, certainly. What can I do?
728 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
729 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
730 struct tcp_md5sig_key
*key
,
731 int reply_flags
, u8 tos
)
733 const struct tcphdr
*th
= tcp_hdr(skb
);
736 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
737 #ifdef CONFIG_TCP_MD5SIG
738 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
742 struct ip_reply_arg arg
;
743 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
745 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
746 memset(&arg
, 0, sizeof(arg
));
748 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
749 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
751 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
752 (TCPOPT_TIMESTAMP
<< 8) |
754 rep
.opt
[1] = htonl(tsval
);
755 rep
.opt
[2] = htonl(tsecr
);
756 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
759 /* Swap the send and the receive. */
760 rep
.th
.dest
= th
->source
;
761 rep
.th
.source
= th
->dest
;
762 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
763 rep
.th
.seq
= htonl(seq
);
764 rep
.th
.ack_seq
= htonl(ack
);
766 rep
.th
.window
= htons(win
);
768 #ifdef CONFIG_TCP_MD5SIG
770 int offset
= (tsecr
) ? 3 : 0;
772 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
774 (TCPOPT_MD5SIG
<< 8) |
776 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
777 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
779 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
780 key
, ip_hdr(skb
)->saddr
,
781 ip_hdr(skb
)->daddr
, &rep
.th
);
784 arg
.flags
= reply_flags
;
785 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
786 ip_hdr(skb
)->saddr
, /* XXX */
787 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
788 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
790 arg
.bound_dev_if
= oif
;
792 ip_send_unicast_reply(net
, skb
, ip_hdr(skb
)->saddr
,
793 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
795 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
798 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
800 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
801 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
803 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
804 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
805 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
808 tcp_twsk_md5_key(tcptw
),
809 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
816 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
817 struct request_sock
*req
)
819 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
820 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
822 tcp_v4_send_ack(skb
, (sk
->sk_state
== TCP_LISTEN
) ?
823 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
824 tcp_rsk(req
)->rcv_nxt
, req
->rcv_wnd
,
828 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
830 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
835 * Send a SYN-ACK after having received a SYN.
836 * This still operates on a request_sock only, not on a big
839 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
840 struct request_sock
*req
,
844 const struct inet_request_sock
*ireq
= inet_rsk(req
);
847 struct sk_buff
* skb
;
849 /* First, grab a route. */
850 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
853 skb
= tcp_make_synack(sk
, dst
, req
, NULL
);
856 __tcp_v4_send_check(skb
, ireq
->loc_addr
, ireq
->rmt_addr
);
858 skb_set_queue_mapping(skb
, queue_mapping
);
859 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->loc_addr
,
862 err
= net_xmit_eval(err
);
863 if (!tcp_rsk(req
)->snt_synack
&& !err
)
864 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
870 static int tcp_v4_rtx_synack(struct sock
*sk
, struct request_sock
*req
)
872 int res
= tcp_v4_send_synack(sk
, NULL
, req
, 0, false);
875 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
880 * IPv4 request_sock destructor.
882 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
884 kfree(inet_rsk(req
)->opt
);
888 * Return true if a syncookie should be sent
890 bool tcp_syn_flood_action(struct sock
*sk
,
891 const struct sk_buff
*skb
,
894 const char *msg
= "Dropping request";
895 bool want_cookie
= false;
896 struct listen_sock
*lopt
;
900 #ifdef CONFIG_SYN_COOKIES
901 if (sysctl_tcp_syncookies
) {
902 msg
= "Sending cookies";
904 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDOCOOKIES
);
907 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDROP
);
909 lopt
= inet_csk(sk
)->icsk_accept_queue
.listen_opt
;
910 if (!lopt
->synflood_warned
) {
911 lopt
->synflood_warned
= 1;
912 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
913 proto
, ntohs(tcp_hdr(skb
)->dest
), msg
);
917 EXPORT_SYMBOL(tcp_syn_flood_action
);
920 * Save and compile IPv4 options into the request_sock if needed.
922 static struct ip_options_rcu
*tcp_v4_save_options(struct sk_buff
*skb
)
924 const struct ip_options
*opt
= &(IPCB(skb
)->opt
);
925 struct ip_options_rcu
*dopt
= NULL
;
927 if (opt
&& opt
->optlen
) {
928 int opt_size
= sizeof(*dopt
) + opt
->optlen
;
930 dopt
= kmalloc(opt_size
, GFP_ATOMIC
);
932 if (ip_options_echo(&dopt
->opt
, skb
)) {
941 #ifdef CONFIG_TCP_MD5SIG
943 * RFC2385 MD5 checksumming requires a mapping of
944 * IP address->MD5 Key.
945 * We need to maintain these in the sk structure.
948 /* Find the Key structure for an address. */
949 struct tcp_md5sig_key
*tcp_md5_do_lookup(struct sock
*sk
,
950 const union tcp_md5_addr
*addr
,
953 struct tcp_sock
*tp
= tcp_sk(sk
);
954 struct tcp_md5sig_key
*key
;
955 unsigned int size
= sizeof(struct in_addr
);
956 struct tcp_md5sig_info
*md5sig
;
958 /* caller either holds rcu_read_lock() or socket lock */
959 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
960 sock_owned_by_user(sk
) ||
961 lockdep_is_held(&sk
->sk_lock
.slock
));
964 #if IS_ENABLED(CONFIG_IPV6)
965 if (family
== AF_INET6
)
966 size
= sizeof(struct in6_addr
);
968 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
969 if (key
->family
!= family
)
971 if (!memcmp(&key
->addr
, addr
, size
))
976 EXPORT_SYMBOL(tcp_md5_do_lookup
);
978 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
979 struct sock
*addr_sk
)
981 union tcp_md5_addr
*addr
;
983 addr
= (union tcp_md5_addr
*)&inet_sk(addr_sk
)->inet_daddr
;
984 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
986 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
988 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
989 struct request_sock
*req
)
991 union tcp_md5_addr
*addr
;
993 addr
= (union tcp_md5_addr
*)&inet_rsk(req
)->rmt_addr
;
994 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
997 /* This can be called on a newly created socket, from other files */
998 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
999 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
1001 /* Add Key to the list */
1002 struct tcp_md5sig_key
*key
;
1003 struct tcp_sock
*tp
= tcp_sk(sk
);
1004 struct tcp_md5sig_info
*md5sig
;
1006 key
= tcp_md5_do_lookup(sk
, addr
, family
);
1008 /* Pre-existing entry - just update that one. */
1009 memcpy(key
->key
, newkey
, newkeylen
);
1010 key
->keylen
= newkeylen
;
1014 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1015 sock_owned_by_user(sk
));
1017 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
1021 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1022 INIT_HLIST_HEAD(&md5sig
->head
);
1023 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
1026 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
1029 if (hlist_empty(&md5sig
->head
) && !tcp_alloc_md5sig_pool(sk
)) {
1030 sock_kfree_s(sk
, key
, sizeof(*key
));
1034 memcpy(key
->key
, newkey
, newkeylen
);
1035 key
->keylen
= newkeylen
;
1036 key
->family
= family
;
1037 memcpy(&key
->addr
, addr
,
1038 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
1039 sizeof(struct in_addr
));
1040 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
1043 EXPORT_SYMBOL(tcp_md5_do_add
);
1045 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
1047 struct tcp_sock
*tp
= tcp_sk(sk
);
1048 struct tcp_md5sig_key
*key
;
1049 struct tcp_md5sig_info
*md5sig
;
1051 key
= tcp_md5_do_lookup(sk
, addr
, family
);
1054 hlist_del_rcu(&key
->node
);
1055 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1056 kfree_rcu(key
, rcu
);
1057 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1058 sock_owned_by_user(sk
));
1059 if (hlist_empty(&md5sig
->head
))
1060 tcp_free_md5sig_pool();
1063 EXPORT_SYMBOL(tcp_md5_do_del
);
1065 static void tcp_clear_md5_list(struct sock
*sk
)
1067 struct tcp_sock
*tp
= tcp_sk(sk
);
1068 struct tcp_md5sig_key
*key
;
1069 struct hlist_node
*n
;
1070 struct tcp_md5sig_info
*md5sig
;
1072 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
1074 if (!hlist_empty(&md5sig
->head
))
1075 tcp_free_md5sig_pool();
1076 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
1077 hlist_del_rcu(&key
->node
);
1078 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1079 kfree_rcu(key
, rcu
);
1083 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1086 struct tcp_md5sig cmd
;
1087 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1089 if (optlen
< sizeof(cmd
))
1092 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1095 if (sin
->sin_family
!= AF_INET
)
1098 if (!cmd
.tcpm_key
|| !cmd
.tcpm_keylen
)
1099 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1102 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1105 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1106 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1110 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1111 __be32 daddr
, __be32 saddr
, int nbytes
)
1113 struct tcp4_pseudohdr
*bp
;
1114 struct scatterlist sg
;
1116 bp
= &hp
->md5_blk
.ip4
;
1119 * 1. the TCP pseudo-header (in the order: source IP address,
1120 * destination IP address, zero-padded protocol number, and
1126 bp
->protocol
= IPPROTO_TCP
;
1127 bp
->len
= cpu_to_be16(nbytes
);
1129 sg_init_one(&sg
, bp
, sizeof(*bp
));
1130 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1133 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1134 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1136 struct tcp_md5sig_pool
*hp
;
1137 struct hash_desc
*desc
;
1139 hp
= tcp_get_md5sig_pool();
1141 goto clear_hash_noput
;
1142 desc
= &hp
->md5_desc
;
1144 if (crypto_hash_init(desc
))
1146 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1148 if (tcp_md5_hash_header(hp
, th
))
1150 if (tcp_md5_hash_key(hp
, key
))
1152 if (crypto_hash_final(desc
, md5_hash
))
1155 tcp_put_md5sig_pool();
1159 tcp_put_md5sig_pool();
1161 memset(md5_hash
, 0, 16);
1165 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1166 const struct sock
*sk
, const struct request_sock
*req
,
1167 const struct sk_buff
*skb
)
1169 struct tcp_md5sig_pool
*hp
;
1170 struct hash_desc
*desc
;
1171 const struct tcphdr
*th
= tcp_hdr(skb
);
1172 __be32 saddr
, daddr
;
1175 saddr
= inet_sk(sk
)->inet_saddr
;
1176 daddr
= inet_sk(sk
)->inet_daddr
;
1178 saddr
= inet_rsk(req
)->loc_addr
;
1179 daddr
= inet_rsk(req
)->rmt_addr
;
1181 const struct iphdr
*iph
= ip_hdr(skb
);
1186 hp
= tcp_get_md5sig_pool();
1188 goto clear_hash_noput
;
1189 desc
= &hp
->md5_desc
;
1191 if (crypto_hash_init(desc
))
1194 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1196 if (tcp_md5_hash_header(hp
, th
))
1198 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1200 if (tcp_md5_hash_key(hp
, key
))
1202 if (crypto_hash_final(desc
, md5_hash
))
1205 tcp_put_md5sig_pool();
1209 tcp_put_md5sig_pool();
1211 memset(md5_hash
, 0, 16);
1214 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1216 static bool tcp_v4_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
1219 * This gets called for each TCP segment that arrives
1220 * so we want to be efficient.
1221 * We have 3 drop cases:
1222 * o No MD5 hash and one expected.
1223 * o MD5 hash and we're not expecting one.
1224 * o MD5 hash and its wrong.
1226 const __u8
*hash_location
= NULL
;
1227 struct tcp_md5sig_key
*hash_expected
;
1228 const struct iphdr
*iph
= ip_hdr(skb
);
1229 const struct tcphdr
*th
= tcp_hdr(skb
);
1231 unsigned char newhash
[16];
1233 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1235 hash_location
= tcp_parse_md5sig_option(th
);
1237 /* We've parsed the options - do we have a hash? */
1238 if (!hash_expected
&& !hash_location
)
1241 if (hash_expected
&& !hash_location
) {
1242 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1246 if (!hash_expected
&& hash_location
) {
1247 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1251 /* Okay, so this is hash_expected and hash_location -
1252 * so we need to calculate the checksum.
1254 genhash
= tcp_v4_md5_hash_skb(newhash
,
1258 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1259 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1260 &iph
->saddr
, ntohs(th
->source
),
1261 &iph
->daddr
, ntohs(th
->dest
),
1262 genhash
? " tcp_v4_calc_md5_hash failed"
1271 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1273 .obj_size
= sizeof(struct tcp_request_sock
),
1274 .rtx_syn_ack
= tcp_v4_rtx_synack
,
1275 .send_ack
= tcp_v4_reqsk_send_ack
,
1276 .destructor
= tcp_v4_reqsk_destructor
,
1277 .send_reset
= tcp_v4_send_reset
,
1278 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1281 #ifdef CONFIG_TCP_MD5SIG
1282 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1283 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1284 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1288 static bool tcp_fastopen_check(struct sock
*sk
, struct sk_buff
*skb
,
1289 struct request_sock
*req
,
1290 struct tcp_fastopen_cookie
*foc
,
1291 struct tcp_fastopen_cookie
*valid_foc
)
1293 bool skip_cookie
= false;
1294 struct fastopen_queue
*fastopenq
;
1296 if (likely(!fastopen_cookie_present(foc
))) {
1297 /* See include/net/tcp.h for the meaning of these knobs */
1298 if ((sysctl_tcp_fastopen
& TFO_SERVER_ALWAYS
) ||
1299 ((sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_REQD
) &&
1300 (TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
+ 1)))
1301 skip_cookie
= true; /* no cookie to validate */
1305 fastopenq
= inet_csk(sk
)->icsk_accept_queue
.fastopenq
;
1306 /* A FO option is present; bump the counter. */
1307 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVE
);
1309 /* Make sure the listener has enabled fastopen, and we don't
1310 * exceed the max # of pending TFO requests allowed before trying
1311 * to validating the cookie in order to avoid burning CPU cycles
1314 * XXX (TFO) - The implication of checking the max_qlen before
1315 * processing a cookie request is that clients can't differentiate
1316 * between qlen overflow causing Fast Open to be disabled
1317 * temporarily vs a server not supporting Fast Open at all.
1319 if ((sysctl_tcp_fastopen
& TFO_SERVER_ENABLE
) == 0 ||
1320 fastopenq
== NULL
|| fastopenq
->max_qlen
== 0)
1323 if (fastopenq
->qlen
>= fastopenq
->max_qlen
) {
1324 struct request_sock
*req1
;
1325 spin_lock(&fastopenq
->lock
);
1326 req1
= fastopenq
->rskq_rst_head
;
1327 if ((req1
== NULL
) || time_after(req1
->expires
, jiffies
)) {
1328 spin_unlock(&fastopenq
->lock
);
1329 NET_INC_STATS_BH(sock_net(sk
),
1330 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW
);
1331 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1335 fastopenq
->rskq_rst_head
= req1
->dl_next
;
1337 spin_unlock(&fastopenq
->lock
);
1341 tcp_rsk(req
)->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1344 if (foc
->len
== TCP_FASTOPEN_COOKIE_SIZE
) {
1345 if ((sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_CHKED
) == 0) {
1346 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1347 if ((valid_foc
->len
!= TCP_FASTOPEN_COOKIE_SIZE
) ||
1348 memcmp(&foc
->val
[0], &valid_foc
->val
[0],
1349 TCP_FASTOPEN_COOKIE_SIZE
) != 0)
1351 valid_foc
->len
= -1;
1353 /* Acknowledge the data received from the peer. */
1354 tcp_rsk(req
)->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1356 } else if (foc
->len
== 0) { /* Client requesting a cookie */
1357 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1358 NET_INC_STATS_BH(sock_net(sk
),
1359 LINUX_MIB_TCPFASTOPENCOOKIEREQD
);
1361 /* Client sent a cookie with wrong size. Treat it
1362 * the same as invalid and return a valid one.
1364 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1369 static int tcp_v4_conn_req_fastopen(struct sock
*sk
,
1370 struct sk_buff
*skb
,
1371 struct sk_buff
*skb_synack
,
1372 struct request_sock
*req
)
1374 struct tcp_sock
*tp
= tcp_sk(sk
);
1375 struct request_sock_queue
*queue
= &inet_csk(sk
)->icsk_accept_queue
;
1376 const struct inet_request_sock
*ireq
= inet_rsk(req
);
1380 req
->num_retrans
= 0;
1381 req
->num_timeout
= 0;
1384 child
= inet_csk(sk
)->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, NULL
);
1385 if (child
== NULL
) {
1386 NET_INC_STATS_BH(sock_net(sk
),
1387 LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
1388 kfree_skb(skb_synack
);
1391 err
= ip_build_and_send_pkt(skb_synack
, sk
, ireq
->loc_addr
,
1392 ireq
->rmt_addr
, ireq
->opt
);
1393 err
= net_xmit_eval(err
);
1395 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1396 /* XXX (TFO) - is it ok to ignore error and continue? */
1398 spin_lock(&queue
->fastopenq
->lock
);
1399 queue
->fastopenq
->qlen
++;
1400 spin_unlock(&queue
->fastopenq
->lock
);
1402 /* Initialize the child socket. Have to fix some values to take
1403 * into account the child is a Fast Open socket and is created
1404 * only out of the bits carried in the SYN packet.
1408 tp
->fastopen_rsk
= req
;
1409 /* Do a hold on the listner sk so that if the listener is being
1410 * closed, the child that has been accepted can live on and still
1411 * access listen_lock.
1414 tcp_rsk(req
)->listener
= sk
;
1416 /* RFC1323: The window in SYN & SYN/ACK segments is never
1417 * scaled. So correct it appropriately.
1419 tp
->snd_wnd
= ntohs(tcp_hdr(skb
)->window
);
1421 /* Activate the retrans timer so that SYNACK can be retransmitted.
1422 * The request socket is not added to the SYN table of the parent
1423 * because it's been added to the accept queue directly.
1425 inet_csk_reset_xmit_timer(child
, ICSK_TIME_RETRANS
,
1426 TCP_TIMEOUT_INIT
, sysctl_tcp_rto_max
);
1428 /* Add the child socket directly into the accept queue */
1429 inet_csk_reqsk_queue_add(sk
, req
, child
);
1431 /* Now finish processing the fastopen child socket. */
1432 inet_csk(child
)->icsk_af_ops
->rebuild_header(child
);
1433 tcp_init_congestion_control(child
);
1434 tcp_mtup_init(child
);
1435 tcp_init_buffer_space(child
);
1436 tcp_init_metrics(child
);
1438 /* Queue the data carried in the SYN packet. We need to first
1439 * bump skb's refcnt because the caller will attempt to free it.
1441 * XXX (TFO) - we honor a zero-payload TFO request for now.
1442 * (Any reason not to?)
1444 if (TCP_SKB_CB(skb
)->end_seq
== TCP_SKB_CB(skb
)->seq
+ 1) {
1445 /* Don't queue the skb if there is no payload in SYN.
1446 * XXX (TFO) - How about SYN+FIN?
1448 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1452 __skb_pull(skb
, tcp_hdr(skb
)->doff
* 4);
1453 skb_set_owner_r(skb
, child
);
1454 __skb_queue_tail(&child
->sk_receive_queue
, skb
);
1455 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1456 tp
->syn_data_acked
= 1;
1458 sk
->sk_data_ready(sk
, 0);
1459 bh_unlock_sock(child
);
1461 WARN_ON(req
->sk
== NULL
);
1465 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1467 struct tcp_options_received tmp_opt
;
1468 struct request_sock
*req
;
1469 struct inet_request_sock
*ireq
;
1470 struct tcp_sock
*tp
= tcp_sk(sk
);
1471 struct dst_entry
*dst
= NULL
;
1472 __be32 saddr
= ip_hdr(skb
)->saddr
;
1473 __be32 daddr
= ip_hdr(skb
)->daddr
;
1474 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1475 bool want_cookie
= false;
1477 struct tcp_fastopen_cookie foc
= { .len
= -1 };
1478 struct tcp_fastopen_cookie valid_foc
= { .len
= -1 };
1479 struct sk_buff
*skb_synack
;
1482 /* Never answer to SYNs send to broadcast or multicast */
1483 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1486 /* TW buckets are converted to open requests without
1487 * limitations, they conserve resources and peer is
1488 * evidently real one.
1490 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1491 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCP");
1496 /* Accept backlog is full. If we have already queued enough
1497 * of warm entries in syn queue, drop request. It is better than
1498 * clogging syn queue with openreqs with exponentially increasing
1501 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1) {
1502 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1506 req
= inet_reqsk_alloc(&tcp_request_sock_ops
);
1510 #ifdef CONFIG_TCP_MD5SIG
1511 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv4_ops
;
1514 tcp_clear_options(&tmp_opt
);
1515 tmp_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
1516 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1517 tcp_parse_options(skb
, &tmp_opt
, 0, want_cookie
? NULL
: &foc
);
1519 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1520 tcp_clear_options(&tmp_opt
);
1522 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1523 tcp_openreq_init(req
, &tmp_opt
, skb
);
1525 ireq
= inet_rsk(req
);
1526 ireq
->loc_addr
= daddr
;
1527 ireq
->rmt_addr
= saddr
;
1528 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1529 ireq
->opt
= tcp_v4_save_options(skb
);
1530 ireq
->ir_mark
= inet_request_mark(sk
, skb
);
1532 if (security_inet_conn_request(sk
, skb
, req
))
1535 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1536 TCP_ECN_create_request(req
, skb
, sock_net(sk
));
1539 isn
= cookie_v4_init_sequence(sk
, skb
, &req
->mss
);
1540 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1542 /* VJ's idea. We save last timestamp seen
1543 * from the destination in peer table, when entering
1544 * state TIME-WAIT, and check against it before
1545 * accepting new connection request.
1547 * If "isn" is not zero, this request hit alive
1548 * timewait bucket, so that all the necessary checks
1549 * are made in the function processing timewait state.
1551 if (tmp_opt
.saw_tstamp
&&
1552 tcp_death_row
.sysctl_tw_recycle
&&
1553 (dst
= inet_csk_route_req(sk
, &fl4
, req
)) != NULL
&&
1554 fl4
.daddr
== saddr
) {
1555 if (!tcp_peer_is_proven(req
, dst
, true)) {
1556 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1557 goto drop_and_release
;
1560 /* Kill the following clause, if you dislike this way. */
1561 else if (!sysctl_tcp_syncookies
&&
1562 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1563 (sysctl_max_syn_backlog
>> 2)) &&
1564 !tcp_peer_is_proven(req
, dst
, false)) {
1565 /* Without syncookies last quarter of
1566 * backlog is filled with destinations,
1567 * proven to be alive.
1568 * It means that we continue to communicate
1569 * to destinations, already remembered
1570 * to the moment of synflood.
1572 LIMIT_NETDEBUG(KERN_DEBUG
pr_fmt("drop open request from %pI4/%u\n"),
1573 &saddr
, ntohs(tcp_hdr(skb
)->source
));
1574 goto drop_and_release
;
1577 isn
= tcp_v4_init_sequence(skb
);
1579 tcp_rsk(req
)->snt_isn
= isn
;
1582 dst
= inet_csk_route_req(sk
, &fl4
, req
);
1586 do_fastopen
= tcp_fastopen_check(sk
, skb
, req
, &foc
, &valid_foc
);
1588 /* We don't call tcp_v4_send_synack() directly because we need
1589 * to make sure a child socket can be created successfully before
1590 * sending back synack!
1592 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1593 * (or better yet, call tcp_send_synack() in the child context
1594 * directly, but will have to fix bunch of other code first)
1595 * after syn_recv_sock() except one will need to first fix the
1596 * latter to remove its dependency on the current implementation
1597 * of tcp_v4_send_synack()->tcp_select_initial_window().
1599 skb_synack
= tcp_make_synack(sk
, dst
, req
,
1600 fastopen_cookie_present(&valid_foc
) ? &valid_foc
: NULL
);
1603 __tcp_v4_send_check(skb_synack
, ireq
->loc_addr
, ireq
->rmt_addr
);
1604 skb_set_queue_mapping(skb_synack
, skb_get_queue_mapping(skb
));
1608 if (likely(!do_fastopen
)) {
1610 err
= ip_build_and_send_pkt(skb_synack
, sk
, ireq
->loc_addr
,
1611 ireq
->rmt_addr
, ireq
->opt
);
1612 err
= net_xmit_eval(err
);
1613 if (err
|| want_cookie
)
1616 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1617 tcp_rsk(req
)->listener
= NULL
;
1618 /* Add the request_sock to the SYN table */
1619 inet_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1620 if (fastopen_cookie_present(&foc
) && foc
.len
!= 0)
1621 NET_INC_STATS_BH(sock_net(sk
),
1622 LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
1623 } else if (tcp_v4_conn_req_fastopen(sk
, skb
, skb_synack
, req
))
1633 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1636 EXPORT_SYMBOL(tcp_v4_conn_request
);
1640 * The three way handshake has completed - we got a valid synack -
1641 * now create the new socket.
1643 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1644 struct request_sock
*req
,
1645 struct dst_entry
*dst
)
1647 struct inet_request_sock
*ireq
;
1648 struct inet_sock
*newinet
;
1649 struct tcp_sock
*newtp
;
1651 #ifdef CONFIG_TCP_MD5SIG
1652 struct tcp_md5sig_key
*key
;
1654 struct ip_options_rcu
*inet_opt
;
1656 if (sk_acceptq_is_full(sk
))
1659 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1663 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1664 inet_sk_rx_dst_set(newsk
, skb
);
1666 newtp
= tcp_sk(newsk
);
1667 newinet
= inet_sk(newsk
);
1668 ireq
= inet_rsk(req
);
1669 newinet
->inet_daddr
= ireq
->rmt_addr
;
1670 newinet
->inet_rcv_saddr
= ireq
->loc_addr
;
1671 newinet
->inet_saddr
= ireq
->loc_addr
;
1672 inet_opt
= ireq
->opt
;
1673 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1675 newinet
->mc_index
= inet_iif(skb
);
1676 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1677 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1678 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1680 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1681 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1684 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1688 /* syncookie case : see end of cookie_v4_check() */
1690 sk_setup_caps(newsk
, dst
);
1692 tcp_mtup_init(newsk
);
1693 tcp_sync_mss(newsk
, dst_mtu(dst
));
1694 newtp
->advmss
= dst_metric_advmss(dst
);
1695 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1696 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1697 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1699 tcp_initialize_rcv_mss(newsk
);
1700 tcp_synack_rtt_meas(newsk
, req
);
1701 newtp
->total_retrans
= req
->num_retrans
;
1703 #ifdef CONFIG_TCP_MD5SIG
1704 /* Copy over the MD5 key from the original socket */
1705 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1709 * We're using one, so create a matching key
1710 * on the newsk structure. If we fail to get
1711 * memory, then we end up not copying the key
1714 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1715 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1716 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1720 if (__inet_inherit_port(sk
, newsk
) < 0)
1722 __inet_hash_nolisten(newsk
, NULL
);
1727 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1731 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1734 inet_csk_prepare_forced_close(newsk
);
1738 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1740 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1742 struct tcphdr
*th
= tcp_hdr(skb
);
1743 const struct iphdr
*iph
= ip_hdr(skb
);
1745 struct request_sock
**prev
;
1746 /* Find possible connection requests. */
1747 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1748 iph
->saddr
, iph
->daddr
);
1750 return tcp_check_req(sk
, skb
, req
, prev
, false);
1752 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1753 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1756 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1760 inet_twsk_put(inet_twsk(nsk
));
1764 #ifdef CONFIG_SYN_COOKIES
1766 sk
= cookie_v4_check(sk
, skb
, &(IPCB(skb
)->opt
));
1771 static __sum16
tcp_v4_checksum_init(struct sk_buff
*skb
)
1773 const struct iphdr
*iph
= ip_hdr(skb
);
1775 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1776 if (!tcp_v4_check(skb
->len
, iph
->saddr
,
1777 iph
->daddr
, skb
->csum
)) {
1778 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1783 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1784 skb
->len
, IPPROTO_TCP
, 0);
1786 if (skb
->len
<= 76) {
1787 return __skb_checksum_complete(skb
);
1793 /* The socket must have it's spinlock held when we get
1796 * We have a potential double-lock case here, so even when
1797 * doing backlog processing we use the BH locking scheme.
1798 * This is because we cannot sleep with the original spinlock
1801 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1804 #ifdef CONFIG_TCP_MD5SIG
1806 * We really want to reject the packet as early as possible
1808 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1809 * o There is an MD5 option and we're not expecting one
1811 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1815 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1816 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1818 sock_rps_save_rxhash(sk
, skb
);
1820 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1821 dst
->ops
->check(dst
, 0) == NULL
) {
1823 sk
->sk_rx_dst
= NULL
;
1826 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1833 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1836 if (sk
->sk_state
== TCP_LISTEN
) {
1837 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1842 sock_rps_save_rxhash(nsk
, skb
);
1843 if (tcp_child_process(sk
, nsk
, skb
)) {
1850 sock_rps_save_rxhash(sk
, skb
);
1852 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1859 tcp_v4_send_reset(rsk
, skb
);
1862 /* Be careful here. If this function gets more complicated and
1863 * gcc suffers from register pressure on the x86, sk (in %ebx)
1864 * might be destroyed here. This current version compiles correctly,
1865 * but you have been warned.
1870 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1871 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1874 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1876 void tcp_v4_early_demux(struct sk_buff
*skb
)
1878 const struct iphdr
*iph
;
1879 const struct tcphdr
*th
;
1882 if (skb
->pkt_type
!= PACKET_HOST
)
1885 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1891 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1894 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1895 iph
->saddr
, th
->source
,
1896 iph
->daddr
, ntohs(th
->dest
),
1900 skb
->destructor
= sock_edemux
;
1901 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1902 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1905 dst
= dst_check(dst
, 0);
1907 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1908 skb_dst_set_noref(skb
, dst
);
1913 /* Packet is added to VJ-style prequeue for processing in process
1914 * context, if a reader task is waiting. Apparently, this exciting
1915 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1916 * failed somewhere. Latency? Burstiness? Well, at least now we will
1917 * see, why it failed. 8)8) --ANK
1920 bool tcp_prequeue(struct sock
*sk
, struct sk_buff
*skb
)
1922 struct tcp_sock
*tp
= tcp_sk(sk
);
1924 if (sysctl_tcp_low_latency
|| !tp
->ucopy
.task
)
1927 if (skb
->len
<= tcp_hdrlen(skb
) &&
1928 skb_queue_len(&tp
->ucopy
.prequeue
) == 0)
1932 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
);
1933 tp
->ucopy
.memory
+= skb
->truesize
;
1934 if (tp
->ucopy
.memory
> sk
->sk_rcvbuf
) {
1935 struct sk_buff
*skb1
;
1937 BUG_ON(sock_owned_by_user(sk
));
1939 while ((skb1
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
) {
1940 sk_backlog_rcv(sk
, skb1
);
1941 NET_INC_STATS_BH(sock_net(sk
),
1942 LINUX_MIB_TCPPREQUEUEDROPPED
);
1945 tp
->ucopy
.memory
= 0;
1946 } else if (skb_queue_len(&tp
->ucopy
.prequeue
) == 1) {
1947 wake_up_interruptible_sync_poll(sk_sleep(sk
),
1948 POLLIN
| POLLRDNORM
| POLLRDBAND
);
1949 if (!inet_csk_ack_scheduled(sk
))
1950 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
1951 (3 * tcp_rto_min(sk
)) / 4,
1952 sysctl_tcp_rto_max
);
1956 EXPORT_SYMBOL(tcp_prequeue
);
1962 int tcp_v4_rcv(struct sk_buff
*skb
)
1964 const struct iphdr
*iph
;
1965 const struct tcphdr
*th
;
1968 struct net
*net
= dev_net(skb
->dev
);
1970 if (skb
->pkt_type
!= PACKET_HOST
)
1973 /* Count it even if it's bad */
1974 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1976 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1981 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1983 if (!pskb_may_pull(skb
, th
->doff
* 4))
1986 /* An explanation is required here, I think.
1987 * Packet length and doff are validated by header prediction,
1988 * provided case of th->doff==0 is eliminated.
1989 * So, we defer the checks. */
1990 if (!skb_csum_unnecessary(skb
) && tcp_v4_checksum_init(skb
))
1995 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1996 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1997 skb
->len
- th
->doff
* 4);
1998 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1999 TCP_SKB_CB(skb
)->when
= 0;
2000 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
2001 TCP_SKB_CB(skb
)->sacked
= 0;
2003 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
2008 if (sk
->sk_state
== TCP_TIME_WAIT
)
2011 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
2012 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
2013 goto discard_and_relse
;
2016 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
2017 goto discard_and_relse
;
2020 if (sk_filter(sk
, skb
))
2021 goto discard_and_relse
;
2025 bh_lock_sock_nested(sk
);
2027 if (!sock_owned_by_user(sk
)) {
2028 #ifdef CONFIG_NET_DMA
2029 struct tcp_sock
*tp
= tcp_sk(sk
);
2030 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
2031 tp
->ucopy
.dma_chan
= net_dma_find_channel();
2032 if (tp
->ucopy
.dma_chan
)
2033 ret
= tcp_v4_do_rcv(sk
, skb
);
2037 if (!tcp_prequeue(sk
, skb
))
2038 ret
= tcp_v4_do_rcv(sk
, skb
);
2040 } else if (unlikely(sk_add_backlog(sk
, skb
,
2041 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
2043 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
2044 goto discard_and_relse
;
2053 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
2056 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
2058 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
2060 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
2062 tcp_v4_send_reset(NULL
, skb
);
2066 /* Discard frame. */
2075 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
2076 inet_twsk_put(inet_twsk(sk
));
2080 if (skb
->len
< (th
->doff
<< 2)) {
2081 inet_twsk_put(inet_twsk(sk
));
2084 if (tcp_checksum_complete(skb
)) {
2085 inet_twsk_put(inet_twsk(sk
));
2088 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
2090 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
2092 iph
->saddr
, th
->source
,
2093 iph
->daddr
, th
->dest
,
2096 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
2097 inet_twsk_put(inet_twsk(sk
));
2101 /* Fall through to ACK */
2104 tcp_v4_timewait_ack(sk
, skb
);
2108 case TCP_TW_SUCCESS
:;
2113 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
2114 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
2115 .twsk_unique
= tcp_twsk_unique
,
2116 .twsk_destructor
= tcp_twsk_destructor
,
2119 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
2121 struct dst_entry
*dst
= skb_dst(skb
);
2124 sk
->sk_rx_dst
= dst
;
2125 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
2127 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
2129 const struct inet_connection_sock_af_ops ipv4_specific
= {
2130 .queue_xmit
= ip_queue_xmit
,
2131 .send_check
= tcp_v4_send_check
,
2132 .rebuild_header
= inet_sk_rebuild_header
,
2133 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
2134 .conn_request
= tcp_v4_conn_request
,
2135 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
2136 .net_header_len
= sizeof(struct iphdr
),
2137 .setsockopt
= ip_setsockopt
,
2138 .getsockopt
= ip_getsockopt
,
2139 .addr2sockaddr
= inet_csk_addr2sockaddr
,
2140 .sockaddr_len
= sizeof(struct sockaddr_in
),
2141 .bind_conflict
= inet_csk_bind_conflict
,
2142 #ifdef CONFIG_COMPAT
2143 .compat_setsockopt
= compat_ip_setsockopt
,
2144 .compat_getsockopt
= compat_ip_getsockopt
,
2147 EXPORT_SYMBOL(ipv4_specific
);
2149 #ifdef CONFIG_TCP_MD5SIG
2150 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
2151 .md5_lookup
= tcp_v4_md5_lookup
,
2152 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
2153 .md5_parse
= tcp_v4_parse_md5_keys
,
2157 /* NOTE: A lot of things set to zero explicitly by call to
2158 * sk_alloc() so need not be done here.
2160 static int tcp_v4_init_sock(struct sock
*sk
)
2162 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2165 icsk
->icsk_MMSRB
= 0;
2167 icsk
->icsk_af_ops
= &ipv4_specific
;
2169 #ifdef CONFIG_TCP_MD5SIG
2170 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
2176 void tcp_v4_destroy_sock(struct sock
*sk
)
2178 struct tcp_sock
*tp
= tcp_sk(sk
);
2180 tcp_clear_xmit_timers(sk
);
2182 tcp_cleanup_congestion_control(sk
);
2184 /* Cleanup up the write buffer. */
2185 tcp_write_queue_purge(sk
);
2187 /* Cleans up our, hopefully empty, out_of_order_queue. */
2188 __skb_queue_purge(&tp
->out_of_order_queue
);
2190 #ifdef CONFIG_TCP_MD5SIG
2191 /* Clean up the MD5 key list, if any */
2192 if (tp
->md5sig_info
) {
2193 tcp_clear_md5_list(sk
);
2194 kfree_rcu(tp
->md5sig_info
, rcu
);
2195 tp
->md5sig_info
= NULL
;
2199 #ifdef CONFIG_NET_DMA
2200 /* Cleans up our sk_async_wait_queue */
2201 __skb_queue_purge(&sk
->sk_async_wait_queue
);
2204 /* Clean prequeue, it must be empty really */
2205 __skb_queue_purge(&tp
->ucopy
.prequeue
);
2207 /* Clean up a referenced TCP bind bucket. */
2208 if (inet_csk(sk
)->icsk_bind_hash
)
2211 BUG_ON(tp
->fastopen_rsk
!= NULL
);
2213 /* If socket is aborted during connect operation */
2214 tcp_free_fastopen_req(tp
);
2216 sk_sockets_allocated_dec(sk
);
2217 sock_release_memcg(sk
);
2219 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
2221 void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e
)
2223 unsigned int bucket
;
2224 uid_t skuid
= (uid_t
)(uid_e
.appuid
);
2225 struct inet_connection_sock
*icsk
= NULL
;//inet_csk(sk);
2228 for (bucket
= 0; bucket
< tcp_hashinfo
.ehash_mask
; bucket
++) {
2229 struct hlist_nulls_node
*node
;
2231 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, bucket
);
2234 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[bucket
].chain
) {
2236 if (sysctl_ip_dynaddr
&& sk
->sk_state
== TCP_SYN_SENT
)
2238 if (sock_flag(sk
, SOCK_DEAD
))
2242 if(SOCK_INODE(sk
->sk_socket
)->i_uid
!= skuid
)
2245 printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
2246 SOCK_INODE(sk
->sk_socket
)->i_uid
);
2252 spin_unlock_bh(lock
);
2257 // update sk time out value
2258 icsk
= inet_csk(sk
);
2259 printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
2261 sk_reset_timer(sk
, &icsk
->icsk_retransmit_timer
, jiffies
+ 2);
2262 icsk
->icsk_rto
= sysctl_tcp_rto_min
* 30;
2263 icsk
->icsk_MMSRB
= 1;
2271 spin_unlock_bh(lock
);
2278 * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
2280 void tcp_v4_reset_connections_by_uid(struct uid_err uid_e
)
2282 unsigned int bucket
;
2283 uid_t skuid
= (uid_t
)(uid_e
.appuid
);
2285 for (bucket
= 0; bucket
< tcp_hashinfo
.ehash_mask
; bucket
++) {
2286 struct hlist_nulls_node
*node
;
2288 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, bucket
);
2292 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[bucket
].chain
) {
2294 if (sysctl_ip_dynaddr
&& sk
->sk_state
== TCP_SYN_SENT
)
2296 if (sock_flag(sk
, SOCK_DEAD
))
2300 if(SOCK_INODE(sk
->sk_socket
)->i_uid
!= skuid
)
2303 printk(KERN_INFO
"SIOCKILLSOCK socket uid(%d) match!",
2304 SOCK_INODE(sk
->sk_socket
)->i_uid
);
2310 spin_unlock_bh(lock
);
2314 sk
->sk_err
= uid_e
.errNum
;
2315 printk(KERN_INFO
"SIOCKILLSOCK set sk err == %d!! \n", sk
->sk_err
);
2316 sk
->sk_error_report(sk
);
2325 spin_unlock_bh(lock
);
2330 #ifdef CONFIG_PROC_FS
2331 /* Proc filesystem TCP sock list dumping. */
2333 static inline struct inet_timewait_sock
*tw_head(struct hlist_nulls_head
*head
)
2335 return hlist_nulls_empty(head
) ? NULL
:
2336 list_entry(head
->first
, struct inet_timewait_sock
, tw_node
);
2339 static inline struct inet_timewait_sock
*tw_next(struct inet_timewait_sock
*tw
)
2341 return !is_a_nulls(tw
->tw_node
.next
) ?
2342 hlist_nulls_entry(tw
->tw_node
.next
, typeof(*tw
), tw_node
) : NULL
;
2346 * Get next listener socket follow cur. If cur is NULL, get first socket
2347 * starting from bucket given in st->bucket; when st->bucket is zero the
2348 * very first socket in the hash table is returned.
2350 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
2352 struct inet_connection_sock
*icsk
;
2353 struct hlist_nulls_node
*node
;
2354 struct sock
*sk
= cur
;
2355 struct inet_listen_hashbucket
*ilb
;
2356 struct tcp_iter_state
*st
= seq
->private;
2357 struct net
*net
= seq_file_net(seq
);
2360 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2361 spin_lock_bh(&ilb
->lock
);
2362 sk
= sk_nulls_head(&ilb
->head
);
2366 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2370 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
2371 struct request_sock
*req
= cur
;
2373 icsk
= inet_csk(st
->syn_wait_sk
);
2377 if (req
->rsk_ops
->family
== st
->family
) {
2383 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
2386 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
2388 sk
= sk_nulls_next(st
->syn_wait_sk
);
2389 st
->state
= TCP_SEQ_STATE_LISTENING
;
2390 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2392 icsk
= inet_csk(sk
);
2393 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2394 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
2396 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2397 sk
= sk_nulls_next(sk
);
2400 sk_nulls_for_each_from(sk
, node
) {
2401 if (!net_eq(sock_net(sk
), net
))
2403 if (sk
->sk_family
== st
->family
) {
2407 icsk
= inet_csk(sk
);
2408 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2409 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
2411 st
->uid
= sock_i_uid(sk
);
2412 st
->syn_wait_sk
= sk
;
2413 st
->state
= TCP_SEQ_STATE_OPENREQ
;
2417 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2419 spin_unlock_bh(&ilb
->lock
);
2421 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
2422 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2423 spin_lock_bh(&ilb
->lock
);
2424 sk
= sk_nulls_head(&ilb
->head
);
2432 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2434 struct tcp_iter_state
*st
= seq
->private;
2439 rc
= listening_get_next(seq
, NULL
);
2441 while (rc
&& *pos
) {
2442 rc
= listening_get_next(seq
, rc
);
2448 static inline bool empty_bucket(struct tcp_iter_state
*st
)
2450 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
) &&
2451 hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2455 * Get first established socket starting from bucket given in st->bucket.
2456 * If st->bucket is zero, the very first socket in the hash is returned.
2458 static void *established_get_first(struct seq_file
*seq
)
2460 struct tcp_iter_state
*st
= seq
->private;
2461 struct net
*net
= seq_file_net(seq
);
2465 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2467 struct hlist_nulls_node
*node
;
2468 struct inet_timewait_sock
*tw
;
2469 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2471 /* Lockless fast path for the common case of empty buckets */
2472 if (empty_bucket(st
))
2476 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2477 if (sk
->sk_family
!= st
->family
||
2478 !net_eq(sock_net(sk
), net
)) {
2484 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2485 inet_twsk_for_each(tw
, node
,
2486 &tcp_hashinfo
.ehash
[st
->bucket
].twchain
) {
2487 if (tw
->tw_family
!= st
->family
||
2488 !net_eq(twsk_net(tw
), net
)) {
2494 spin_unlock_bh(lock
);
2495 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2501 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2503 struct sock
*sk
= cur
;
2504 struct inet_timewait_sock
*tw
;
2505 struct hlist_nulls_node
*node
;
2506 struct tcp_iter_state
*st
= seq
->private;
2507 struct net
*net
= seq_file_net(seq
);
2512 if (st
->state
== TCP_SEQ_STATE_TIME_WAIT
) {
2516 while (tw
&& (tw
->tw_family
!= st
->family
|| !net_eq(twsk_net(tw
), net
))) {
2523 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2524 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2526 /* Look for next non empty bucket */
2528 while (++st
->bucket
<= tcp_hashinfo
.ehash_mask
&&
2531 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2534 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2535 sk
= sk_nulls_head(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2537 sk
= sk_nulls_next(sk
);
2539 sk_nulls_for_each_from(sk
, node
) {
2540 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2544 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2545 tw
= tw_head(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2553 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2555 struct tcp_iter_state
*st
= seq
->private;
2559 rc
= established_get_first(seq
);
2562 rc
= established_get_next(seq
, rc
);
2568 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2571 struct tcp_iter_state
*st
= seq
->private;
2573 st
->state
= TCP_SEQ_STATE_LISTENING
;
2574 rc
= listening_get_idx(seq
, &pos
);
2577 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2578 rc
= established_get_idx(seq
, pos
);
2584 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2586 struct tcp_iter_state
*st
= seq
->private;
2587 int offset
= st
->offset
;
2588 int orig_num
= st
->num
;
2591 switch (st
->state
) {
2592 case TCP_SEQ_STATE_OPENREQ
:
2593 case TCP_SEQ_STATE_LISTENING
:
2594 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2596 st
->state
= TCP_SEQ_STATE_LISTENING
;
2597 rc
= listening_get_next(seq
, NULL
);
2598 while (offset
-- && rc
)
2599 rc
= listening_get_next(seq
, rc
);
2604 case TCP_SEQ_STATE_ESTABLISHED
:
2605 case TCP_SEQ_STATE_TIME_WAIT
:
2606 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2607 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2609 rc
= established_get_first(seq
);
2610 while (offset
-- && rc
)
2611 rc
= established_get_next(seq
, rc
);
2619 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2621 struct tcp_iter_state
*st
= seq
->private;
2624 if (*pos
&& *pos
== st
->last_pos
) {
2625 rc
= tcp_seek_last_pos(seq
);
2630 st
->state
= TCP_SEQ_STATE_LISTENING
;
2634 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2637 st
->last_pos
= *pos
;
2641 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2643 struct tcp_iter_state
*st
= seq
->private;
2646 if (v
== SEQ_START_TOKEN
) {
2647 rc
= tcp_get_idx(seq
, 0);
2651 switch (st
->state
) {
2652 case TCP_SEQ_STATE_OPENREQ
:
2653 case TCP_SEQ_STATE_LISTENING
:
2654 rc
= listening_get_next(seq
, v
);
2656 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2659 rc
= established_get_first(seq
);
2662 case TCP_SEQ_STATE_ESTABLISHED
:
2663 case TCP_SEQ_STATE_TIME_WAIT
:
2664 rc
= established_get_next(seq
, v
);
2669 st
->last_pos
= *pos
;
2673 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2675 struct tcp_iter_state
*st
= seq
->private;
2677 switch (st
->state
) {
2678 case TCP_SEQ_STATE_OPENREQ
:
2680 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2681 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2683 case TCP_SEQ_STATE_LISTENING
:
2684 if (v
!= SEQ_START_TOKEN
)
2685 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2687 case TCP_SEQ_STATE_TIME_WAIT
:
2688 case TCP_SEQ_STATE_ESTABLISHED
:
2690 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2695 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2697 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2698 struct tcp_iter_state
*s
;
2701 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2702 sizeof(struct tcp_iter_state
));
2706 s
= ((struct seq_file
*)file
->private_data
)->private;
2707 s
->family
= afinfo
->family
;
2711 EXPORT_SYMBOL(tcp_seq_open
);
2713 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2716 struct proc_dir_entry
*p
;
2718 afinfo
->seq_ops
.start
= tcp_seq_start
;
2719 afinfo
->seq_ops
.next
= tcp_seq_next
;
2720 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2722 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2723 afinfo
->seq_fops
, afinfo
);
2728 EXPORT_SYMBOL(tcp_proc_register
);
2730 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2732 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2734 EXPORT_SYMBOL(tcp_proc_unregister
);
2736 static void get_openreq4(const struct sock
*sk
, const struct request_sock
*req
,
2737 struct seq_file
*f
, int i
, kuid_t uid
, int *len
)
2739 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2740 long delta
= req
->expires
- jiffies
;
2742 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2743 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2746 ntohs(inet_sk(sk
)->inet_sport
),
2748 ntohs(ireq
->rmt_port
),
2750 0, 0, /* could print option size, but that is af dependent. */
2751 1, /* timers active (only the expire timer) */
2752 jiffies_delta_to_clock_t(delta
),
2754 from_kuid_munged(seq_user_ns(f
), uid
),
2755 0, /* non standard timer */
2756 0, /* open_requests have no inode */
2757 atomic_read(&sk
->sk_refcnt
),
2762 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
, int *len
)
2765 unsigned long timer_expires
;
2766 const struct tcp_sock
*tp
= tcp_sk(sk
);
2767 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2768 const struct inet_sock
*inet
= inet_sk(sk
);
2769 struct fastopen_queue
*fastopenq
= icsk
->icsk_accept_queue
.fastopenq
;
2770 __be32 dest
= inet
->inet_daddr
;
2771 __be32 src
= inet
->inet_rcv_saddr
;
2772 __u16 destp
= ntohs(inet
->inet_dport
);
2773 __u16 srcp
= ntohs(inet
->inet_sport
);
2776 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2777 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
2778 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2780 timer_expires
= icsk
->icsk_timeout
;
2781 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2783 timer_expires
= icsk
->icsk_timeout
;
2784 } else if (timer_pending(&sk
->sk_timer
)) {
2786 timer_expires
= sk
->sk_timer
.expires
;
2789 timer_expires
= jiffies
;
2792 if (sk
->sk_state
== TCP_LISTEN
)
2793 rx_queue
= sk
->sk_ack_backlog
;
2796 * because we dont lock socket, we might find a transient negative value
2798 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2800 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2801 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2802 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2803 tp
->write_seq
- tp
->snd_una
,
2806 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2807 icsk
->icsk_retransmits
,
2808 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2809 icsk
->icsk_probes_out
,
2811 atomic_read(&sk
->sk_refcnt
), sk
,
2812 jiffies_to_clock_t(icsk
->icsk_rto
),
2813 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2814 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2816 sk
->sk_state
== TCP_LISTEN
?
2817 (fastopenq
? fastopenq
->max_qlen
: 0) :
2818 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
),
2822 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2823 struct seq_file
*f
, int i
, int *len
)
2827 long delta
= tw
->tw_ttd
- jiffies
;
2829 dest
= tw
->tw_daddr
;
2830 src
= tw
->tw_rcv_saddr
;
2831 destp
= ntohs(tw
->tw_dport
);
2832 srcp
= ntohs(tw
->tw_sport
);
2834 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2835 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2836 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2837 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2838 atomic_read(&tw
->tw_refcnt
), tw
, len
);
2843 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2845 struct tcp_iter_state
*st
;
2848 if (v
== SEQ_START_TOKEN
) {
2849 seq_printf(seq
, "%-*s\n", TMPSZ
- 1,
2850 " sl local_address rem_address st tx_queue "
2851 "rx_queue tr tm->when retrnsmt uid timeout "
2857 switch (st
->state
) {
2858 case TCP_SEQ_STATE_LISTENING
:
2859 case TCP_SEQ_STATE_ESTABLISHED
:
2860 get_tcp4_sock(v
, seq
, st
->num
, &len
);
2862 case TCP_SEQ_STATE_OPENREQ
:
2863 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
, &len
);
2865 case TCP_SEQ_STATE_TIME_WAIT
:
2866 get_timewait4_sock(v
, seq
, st
->num
, &len
);
2869 seq_printf(seq
, "%*s\n", TMPSZ
- 1 - len
, "");
2874 static const struct file_operations tcp_afinfo_seq_fops
= {
2875 .owner
= THIS_MODULE
,
2876 .open
= tcp_seq_open
,
2878 .llseek
= seq_lseek
,
2879 .release
= seq_release_net
2882 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2885 .seq_fops
= &tcp_afinfo_seq_fops
,
2887 .show
= tcp4_seq_show
,
2891 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2893 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2896 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2898 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2901 static struct pernet_operations tcp4_net_ops
= {
2902 .init
= tcp4_proc_init_net
,
2903 .exit
= tcp4_proc_exit_net
,
2906 int __init
tcp4_proc_init(void)
2908 return register_pernet_subsys(&tcp4_net_ops
);
2911 void tcp4_proc_exit(void)
2913 unregister_pernet_subsys(&tcp4_net_ops
);
2915 #endif /* CONFIG_PROC_FS */
2917 struct sk_buff
**tcp4_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
2919 const struct iphdr
*iph
= skb_gro_network_header(skb
);
2923 switch (skb
->ip_summed
) {
2924 case CHECKSUM_COMPLETE
:
2925 if (!tcp_v4_check(skb_gro_len(skb
), iph
->saddr
, iph
->daddr
,
2927 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2931 NAPI_GRO_CB(skb
)->flush
= 1;
2935 wsum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
2936 skb_gro_len(skb
), IPPROTO_TCP
, 0);
2937 sum
= csum_fold(skb_checksum(skb
,
2938 skb_gro_offset(skb
),
2944 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2948 return tcp_gro_receive(head
, skb
);
2951 int tcp4_gro_complete(struct sk_buff
*skb
)
2953 const struct iphdr
*iph
= ip_hdr(skb
);
2954 struct tcphdr
*th
= tcp_hdr(skb
);
2956 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
2957 iph
->saddr
, iph
->daddr
, 0);
2958 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2960 return tcp_gro_complete(skb
);
2963 struct proto tcp_prot
= {
2965 .owner
= THIS_MODULE
,
2967 .connect
= tcp_v4_connect
,
2968 .disconnect
= tcp_disconnect
,
2969 .accept
= inet_csk_accept
,
2971 .init
= tcp_v4_init_sock
,
2972 .destroy
= tcp_v4_destroy_sock
,
2973 .shutdown
= tcp_shutdown
,
2974 .setsockopt
= tcp_setsockopt
,
2975 .getsockopt
= tcp_getsockopt
,
2976 .recvmsg
= tcp_recvmsg
,
2977 .sendmsg
= tcp_sendmsg
,
2978 .sendpage
= tcp_sendpage
,
2979 .backlog_rcv
= tcp_v4_do_rcv
,
2980 .release_cb
= tcp_release_cb
,
2981 .mtu_reduced
= tcp_v4_mtu_reduced
,
2983 .unhash
= inet_unhash
,
2984 .get_port
= inet_csk_get_port
,
2985 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2986 .sockets_allocated
= &tcp_sockets_allocated
,
2987 .orphan_count
= &tcp_orphan_count
,
2988 .memory_allocated
= &tcp_memory_allocated
,
2989 .memory_pressure
= &tcp_memory_pressure
,
2990 .sysctl_wmem
= sysctl_tcp_wmem
,
2991 .sysctl_rmem
= sysctl_tcp_rmem
,
2992 .max_header
= MAX_TCP_HEADER
,
2993 .obj_size
= sizeof(struct tcp_sock
),
2994 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2995 .twsk_prot
= &tcp_timewait_sock_ops
,
2996 .rsk_prot
= &tcp_request_sock_ops
,
2997 .h
.hashinfo
= &tcp_hashinfo
,
2998 .no_autobind
= true,
2999 #ifdef CONFIG_COMPAT
3000 .compat_setsockopt
= compat_tcp_setsockopt
,
3001 .compat_getsockopt
= compat_tcp_getsockopt
,
3003 #ifdef CONFIG_MEMCG_KMEM
3004 .init_cgroup
= tcp_init_cgroup
,
3005 .destroy_cgroup
= tcp_destroy_cgroup
,
3006 .proto_cgroup
= tcp_proto_cgroup
,
3009 EXPORT_SYMBOL(tcp_prot
);
3011 static int __net_init
tcp_sk_init(struct net
*net
)
3013 net
->ipv4
.sysctl_tcp_ecn
= 2;
3017 static void __net_exit
tcp_sk_exit(struct net
*net
)
3021 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
3023 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
3026 static struct pernet_operations __net_initdata tcp_sk_ops
= {
3027 .init
= tcp_sk_init
,
3028 .exit
= tcp_sk_exit
,
3029 .exit_batch
= tcp_sk_exit_batch
,
3032 void __init
tcp_v4_init(void)
3034 inet_hashinfo_init(&tcp_hashinfo
);
3035 if (register_pernet_subsys(&tcp_sk_ops
))
3036 panic("Failed to create the TCP control socket.\n");