2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly
;
89 int sysctl_tcp_low_latency __read_mostly
;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
94 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
97 struct inet_hashinfo tcp_hashinfo
;
98 EXPORT_SYMBOL(tcp_hashinfo
);
100 static __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
102 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
105 tcp_hdr(skb
)->source
);
108 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
110 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
111 struct tcp_sock
*tp
= tcp_sk(sk
);
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
124 if (tcptw
->tw_ts_recent_stamp
&&
125 (!twp
|| (sysctl_tcp_tw_reuse
&&
126 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
127 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
128 if (tp
->write_seq
== 0)
130 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
131 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
143 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
144 struct inet_sock
*inet
= inet_sk(sk
);
145 struct tcp_sock
*tp
= tcp_sk(sk
);
146 __be16 orig_sport
, orig_dport
;
147 __be32 daddr
, nexthop
;
151 struct ip_options_rcu
*inet_opt
;
153 if (addr_len
< sizeof(struct sockaddr_in
))
156 if (usin
->sin_family
!= AF_INET
)
157 return -EAFNOSUPPORT
;
159 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
160 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
161 sock_owned_by_user(sk
));
162 if (inet_opt
&& inet_opt
->opt
.srr
) {
165 nexthop
= inet_opt
->opt
.faddr
;
168 orig_sport
= inet
->inet_sport
;
169 orig_dport
= usin
->sin_port
;
170 fl4
= &inet
->cork
.fl
.u
.ip4
;
171 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
172 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
174 orig_sport
, orig_dport
, sk
);
177 if (err
== -ENETUNREACH
)
178 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
182 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
187 if (!inet_opt
|| !inet_opt
->opt
.srr
)
190 if (!inet
->inet_saddr
)
191 inet
->inet_saddr
= fl4
->saddr
;
192 sk_rcv_saddr_set(sk
, inet
->inet_saddr
);
194 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
195 /* Reset inherited state */
196 tp
->rx_opt
.ts_recent
= 0;
197 tp
->rx_opt
.ts_recent_stamp
= 0;
198 if (likely(!tp
->repair
))
202 if (tcp_death_row
.sysctl_tw_recycle
&&
203 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
204 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
206 inet
->inet_dport
= usin
->sin_port
;
207 sk_daddr_set(sk
, daddr
);
209 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
211 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
213 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk
, TCP_SYN_SENT
);
221 err
= inet_hash_connect(&tcp_death_row
, sk
);
227 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
228 inet
->inet_sport
, inet
->inet_dport
, sk
);
234 /* OK, now commit destination to socket. */
235 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
236 sk_setup_caps(sk
, &rt
->dst
);
238 if (!tp
->write_seq
&& likely(!tp
->repair
))
239 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
244 inet
->inet_id
= tp
->write_seq
^ jiffies
;
246 err
= tcp_connect(sk
);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk
, TCP_CLOSE
);
261 sk
->sk_route_caps
= 0;
262 inet
->inet_dport
= 0;
265 EXPORT_SYMBOL(tcp_v4_connect
);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 void tcp_v4_mtu_reduced(struct sock
*sk
)
274 struct inet_sock
*inet
= inet_sk(sk
);
275 struct dst_entry
*dst
;
278 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
280 mtu
= tcp_sk(sk
)->mtu_info
;
281 dst
= inet_csk_update_pmtu(sk
, mtu
);
285 /* Something is about to be wrong... Remember soft error
286 * for the case, if this connection will not able to recover.
288 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
289 sk
->sk_err_soft
= EMSGSIZE
;
293 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
294 ip_sk_accept_pmtu(sk
) &&
295 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
296 tcp_sync_mss(sk
, mtu
);
298 /* Resend the TCP packet because it's
299 * clear that the old packet has been
300 * dropped. This is the new "fast" path mtu
303 tcp_simple_retransmit(sk
);
304 } /* else let the usual retransmit timer handle it */
306 EXPORT_SYMBOL(tcp_v4_mtu_reduced
);
308 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
310 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
313 dst
->ops
->redirect(dst
, sk
, skb
);
317 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
318 void tcp_req_err(struct sock
*sk
, u32 seq
, bool abort
)
320 struct request_sock
*req
= inet_reqsk(sk
);
321 struct net
*net
= sock_net(sk
);
323 /* ICMPs are not backlogged, hence we cannot get
324 * an established socket here.
326 if (seq
!= tcp_rsk(req
)->snt_isn
) {
327 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
330 * Still in SYN_RECV, just remove it silently.
331 * There is no good way to pass the error to the newly
332 * created socket, and POSIX does not want network
333 * errors returned from accept().
335 inet_csk_reqsk_queue_drop(req
->rsk_listener
, req
);
336 NET_INC_STATS_BH(net
, LINUX_MIB_LISTENDROPS
);
340 EXPORT_SYMBOL(tcp_req_err
);
343 * This routine is called by the ICMP module when it gets some
344 * sort of error condition. If err < 0 then the socket should
345 * be closed and the error returned to the user. If err > 0
346 * it's just the icmp type << 8 | icmp code. After adjustment
347 * header points to the first 8 bytes of the tcp header. We need
348 * to find the appropriate port.
350 * The locking strategy used here is very "optimistic". When
351 * someone else accesses the socket the ICMP is just dropped
352 * and for some paths there is no check at all.
353 * A more general error queue to queue errors for later handling
354 * is probably better.
358 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
360 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
361 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
362 struct inet_connection_sock
*icsk
;
364 struct inet_sock
*inet
;
365 const int type
= icmp_hdr(icmp_skb
)->type
;
366 const int code
= icmp_hdr(icmp_skb
)->code
;
369 struct request_sock
*fastopen
;
373 struct net
*net
= dev_net(icmp_skb
->dev
);
375 sk
= __inet_lookup_established(net
, &tcp_hashinfo
, iph
->daddr
,
376 th
->dest
, iph
->saddr
, ntohs(th
->source
),
379 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
382 if (sk
->sk_state
== TCP_TIME_WAIT
) {
383 inet_twsk_put(inet_twsk(sk
));
386 seq
= ntohl(th
->seq
);
387 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
388 return tcp_req_err(sk
, seq
,
389 type
== ICMP_PARAMETERPROB
||
390 type
== ICMP_TIME_EXCEEDED
||
391 (type
== ICMP_DEST_UNREACH
&&
392 (code
== ICMP_NET_UNREACH
||
393 code
== ICMP_HOST_UNREACH
)));
396 /* If too many ICMPs get dropped on busy
397 * servers this needs to be solved differently.
398 * We do take care of PMTU discovery (RFC1191) special case :
399 * we can receive locally generated ICMP messages while socket is held.
401 if (sock_owned_by_user(sk
)) {
402 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
403 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
405 if (sk
->sk_state
== TCP_CLOSE
)
408 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
409 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
415 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
416 fastopen
= tp
->fastopen_rsk
;
417 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
418 if (sk
->sk_state
!= TCP_LISTEN
&&
419 !between(seq
, snd_una
, tp
->snd_nxt
)) {
420 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
426 if (!sock_owned_by_user(sk
))
427 do_redirect(icmp_skb
, sk
);
429 case ICMP_SOURCE_QUENCH
:
430 /* Just silently ignore these. */
432 case ICMP_PARAMETERPROB
:
435 case ICMP_DEST_UNREACH
:
436 if (code
> NR_ICMP_UNREACH
)
439 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
440 /* We are not interested in TCP_LISTEN and open_requests
441 * (SYN-ACKs send out by Linux are always <576bytes so
442 * they should go through unfragmented).
444 if (sk
->sk_state
== TCP_LISTEN
)
448 if (!sock_owned_by_user(sk
)) {
449 tcp_v4_mtu_reduced(sk
);
451 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
457 err
= icmp_err_convert
[code
].errno
;
458 /* check if icmp_skb allows revert of backoff
459 * (see draft-zimmermann-tcp-lcd) */
460 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
462 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
463 !icsk
->icsk_backoff
|| fastopen
)
466 if (sock_owned_by_user(sk
))
469 icsk
->icsk_backoff
--;
470 icsk
->icsk_rto
= tp
->srtt_us
? __tcp_set_rto(tp
) :
472 icsk
->icsk_rto
= inet_csk_rto_backoff(icsk
, TCP_RTO_MAX
);
474 skb
= tcp_write_queue_head(sk
);
477 remaining
= icsk
->icsk_rto
-
479 tcp_time_stamp
- tcp_skb_timestamp(skb
));
482 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
483 remaining
, TCP_RTO_MAX
);
485 /* RTO revert clocked out retransmission.
486 * Will retransmit now */
487 tcp_retransmit_timer(sk
);
491 case ICMP_TIME_EXCEEDED
:
498 switch (sk
->sk_state
) {
501 /* Only in fast or simultaneous open. If a fast open socket is
502 * is already accepted it is treated as a connected one below.
504 if (fastopen
&& !fastopen
->sk
)
507 if (!sock_owned_by_user(sk
)) {
510 sk
->sk_error_report(sk
);
514 sk
->sk_err_soft
= err
;
519 /* If we've already connected we will keep trying
520 * until we time out, or the user gives up.
522 * rfc1122 4.2.3.9 allows to consider as hard errors
523 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
524 * but it is obsoleted by pmtu discovery).
526 * Note, that in modern internet, where routing is unreliable
527 * and in each dark corner broken firewalls sit, sending random
528 * errors ordered by their masters even this two messages finally lose
529 * their original sense (even Linux sends invalid PORT_UNREACHs)
531 * Now we are in compliance with RFCs.
536 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
538 sk
->sk_error_report(sk
);
539 } else { /* Only an error on timeout */
540 sk
->sk_err_soft
= err
;
548 void __tcp_v4_send_check(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
550 struct tcphdr
*th
= tcp_hdr(skb
);
552 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
553 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
554 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
555 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
557 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
564 /* This routine computes an IPv4 TCP checksum. */
565 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
567 const struct inet_sock
*inet
= inet_sk(sk
);
569 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
571 EXPORT_SYMBOL(tcp_v4_send_check
);
574 * This routine will send an RST to the other tcp.
576 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
578 * Answer: if a packet caused RST, it is not for a socket
579 * existing in our system, if it is matched to a socket,
580 * it is just duplicate segment or bug in other side's TCP.
581 * So that we build reply only basing on parameters
582 * arrived with segment.
583 * Exception: precedence violation. We do not implement it in any case.
586 static void tcp_v4_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
588 const struct tcphdr
*th
= tcp_hdr(skb
);
591 #ifdef CONFIG_TCP_MD5SIG
592 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
595 struct ip_reply_arg arg
;
596 #ifdef CONFIG_TCP_MD5SIG
597 struct tcp_md5sig_key
*key
;
598 const __u8
*hash_location
= NULL
;
599 unsigned char newhash
[16];
601 struct sock
*sk1
= NULL
;
605 /* Never send a reset in response to a reset. */
609 /* If sk not NULL, it means we did a successful lookup and incoming
610 * route had to be correct. prequeue might have dropped our dst.
612 if (!sk
&& skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
615 /* Swap the send and the receive. */
616 memset(&rep
, 0, sizeof(rep
));
617 rep
.th
.dest
= th
->source
;
618 rep
.th
.source
= th
->dest
;
619 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
623 rep
.th
.seq
= th
->ack_seq
;
626 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
627 skb
->len
- (th
->doff
<< 2));
630 memset(&arg
, 0, sizeof(arg
));
631 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
632 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
634 net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
635 #ifdef CONFIG_TCP_MD5SIG
636 hash_location
= tcp_parse_md5sig_option(th
);
637 if (!sk
&& hash_location
) {
639 * active side is lost. Try to find listening socket through
640 * source port, and then find md5 key through listening socket.
641 * we are not loose security here:
642 * Incoming packet is checked with md5 hash with finding key,
643 * no RST generated if md5 hash doesn't match.
645 sk1
= __inet_lookup_listener(net
,
646 &tcp_hashinfo
, ip_hdr(skb
)->saddr
,
647 th
->source
, ip_hdr(skb
)->daddr
,
648 ntohs(th
->source
), inet_iif(skb
));
649 /* don't send rst if it can't find key */
653 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
654 &ip_hdr(skb
)->saddr
, AF_INET
);
658 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, skb
);
659 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
662 key
= sk
? tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
668 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
670 (TCPOPT_MD5SIG
<< 8) |
672 /* Update length and the length the header thinks exists */
673 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
674 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
676 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
677 key
, ip_hdr(skb
)->saddr
,
678 ip_hdr(skb
)->daddr
, &rep
.th
);
681 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
682 ip_hdr(skb
)->saddr
, /* XXX */
683 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
684 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
685 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
686 /* When socket is gone, all binding information is lost.
687 * routing might fail in this case. No choice here, if we choose to force
688 * input interface, we will misroute in case of asymmetric route.
691 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
693 arg
.tos
= ip_hdr(skb
)->tos
;
694 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
695 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
696 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
697 &arg
, arg
.iov
[0].iov_len
);
699 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
700 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
702 #ifdef CONFIG_TCP_MD5SIG
711 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
712 outside socket context is ugly, certainly. What can I do?
715 static void tcp_v4_send_ack(struct net
*net
,
716 struct sk_buff
*skb
, u32 seq
, u32 ack
,
717 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
718 struct tcp_md5sig_key
*key
,
719 int reply_flags
, u8 tos
)
721 const struct tcphdr
*th
= tcp_hdr(skb
);
724 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
725 #ifdef CONFIG_TCP_MD5SIG
726 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
730 struct ip_reply_arg arg
;
732 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
733 memset(&arg
, 0, sizeof(arg
));
735 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
736 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
738 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
739 (TCPOPT_TIMESTAMP
<< 8) |
741 rep
.opt
[1] = htonl(tsval
);
742 rep
.opt
[2] = htonl(tsecr
);
743 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
746 /* Swap the send and the receive. */
747 rep
.th
.dest
= th
->source
;
748 rep
.th
.source
= th
->dest
;
749 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
750 rep
.th
.seq
= htonl(seq
);
751 rep
.th
.ack_seq
= htonl(ack
);
753 rep
.th
.window
= htons(win
);
755 #ifdef CONFIG_TCP_MD5SIG
757 int offset
= (tsecr
) ? 3 : 0;
759 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
761 (TCPOPT_MD5SIG
<< 8) |
763 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
764 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
766 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
767 key
, ip_hdr(skb
)->saddr
,
768 ip_hdr(skb
)->daddr
, &rep
.th
);
771 arg
.flags
= reply_flags
;
772 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
773 ip_hdr(skb
)->saddr
, /* XXX */
774 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
775 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
777 arg
.bound_dev_if
= oif
;
779 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
780 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
781 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
782 &arg
, arg
.iov
[0].iov_len
);
784 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
787 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
789 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
790 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
792 tcp_v4_send_ack(sock_net(sk
), skb
,
793 tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
794 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
795 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
798 tcp_twsk_md5_key(tcptw
),
799 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
806 static void tcp_v4_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
807 struct request_sock
*req
)
809 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
810 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
812 u32 seq
= (sk
->sk_state
== TCP_LISTEN
) ? tcp_rsk(req
)->snt_isn
+ 1 :
816 * The window field (SEG.WND) of every outgoing segment, with the
817 * exception of <SYN> segments, MUST be right-shifted by
818 * Rcv.Wind.Shift bits:
820 tcp_v4_send_ack(sock_net(sk
), skb
, seq
,
821 tcp_rsk(req
)->rcv_nxt
,
822 req
->rsk_rcv_wnd
>> inet_rsk(req
)->rcv_wscale
,
826 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
828 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
833 * Send a SYN-ACK after having received a SYN.
834 * This still operates on a request_sock only, not on a big
837 static int tcp_v4_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
839 struct request_sock
*req
,
840 struct tcp_fastopen_cookie
*foc
,
843 const struct inet_request_sock
*ireq
= inet_rsk(req
);
848 /* First, grab a route. */
849 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
852 skb
= tcp_make_synack(sk
, dst
, req
, foc
, attach_req
);
855 __tcp_v4_send_check(skb
, ireq
->ir_loc_addr
, ireq
->ir_rmt_addr
);
857 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->ir_loc_addr
,
860 err
= net_xmit_eval(err
);
867 * IPv4 request_sock destructor.
869 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
871 kfree(inet_rsk(req
)->opt
);
875 #ifdef CONFIG_TCP_MD5SIG
877 * RFC2385 MD5 checksumming requires a mapping of
878 * IP address->MD5 Key.
879 * We need to maintain these in the sk structure.
882 /* Find the Key structure for an address. */
883 struct tcp_md5sig_key
*tcp_md5_do_lookup(const struct sock
*sk
,
884 const union tcp_md5_addr
*addr
,
887 const struct tcp_sock
*tp
= tcp_sk(sk
);
888 struct tcp_md5sig_key
*key
;
889 unsigned int size
= sizeof(struct in_addr
);
890 const struct tcp_md5sig_info
*md5sig
;
892 /* caller either holds rcu_read_lock() or socket lock */
893 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
894 sock_owned_by_user(sk
) ||
895 lockdep_is_held((spinlock_t
*)&sk
->sk_lock
.slock
));
898 #if IS_ENABLED(CONFIG_IPV6)
899 if (family
== AF_INET6
)
900 size
= sizeof(struct in6_addr
);
902 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
903 if (key
->family
!= family
)
905 if (!memcmp(&key
->addr
, addr
, size
))
910 EXPORT_SYMBOL(tcp_md5_do_lookup
);
912 struct tcp_md5sig_key
*tcp_v4_md5_lookup(const struct sock
*sk
,
913 const struct sock
*addr_sk
)
915 const union tcp_md5_addr
*addr
;
917 addr
= (const union tcp_md5_addr
*)&addr_sk
->sk_daddr
;
918 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
920 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
922 /* This can be called on a newly created socket, from other files */
923 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
924 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
926 /* Add Key to the list */
927 struct tcp_md5sig_key
*key
;
928 struct tcp_sock
*tp
= tcp_sk(sk
);
929 struct tcp_md5sig_info
*md5sig
;
931 key
= tcp_md5_do_lookup(sk
, addr
, family
);
933 /* Pre-existing entry - just update that one. */
934 memcpy(key
->key
, newkey
, newkeylen
);
935 key
->keylen
= newkeylen
;
939 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
940 sock_owned_by_user(sk
) ||
941 lockdep_is_held(&sk
->sk_lock
.slock
));
943 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
947 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
948 INIT_HLIST_HEAD(&md5sig
->head
);
949 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
952 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
955 if (!tcp_alloc_md5sig_pool()) {
956 sock_kfree_s(sk
, key
, sizeof(*key
));
960 memcpy(key
->key
, newkey
, newkeylen
);
961 key
->keylen
= newkeylen
;
962 key
->family
= family
;
963 memcpy(&key
->addr
, addr
,
964 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
965 sizeof(struct in_addr
));
966 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
969 EXPORT_SYMBOL(tcp_md5_do_add
);
971 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
973 struct tcp_md5sig_key
*key
;
975 key
= tcp_md5_do_lookup(sk
, addr
, family
);
978 hlist_del_rcu(&key
->node
);
979 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
983 EXPORT_SYMBOL(tcp_md5_do_del
);
985 static void tcp_clear_md5_list(struct sock
*sk
)
987 struct tcp_sock
*tp
= tcp_sk(sk
);
988 struct tcp_md5sig_key
*key
;
989 struct hlist_node
*n
;
990 struct tcp_md5sig_info
*md5sig
;
992 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
994 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
995 hlist_del_rcu(&key
->node
);
996 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1001 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1004 struct tcp_md5sig cmd
;
1005 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1007 if (optlen
< sizeof(cmd
))
1010 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1013 if (sin
->sin_family
!= AF_INET
)
1016 if (!cmd
.tcpm_keylen
)
1017 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1020 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1023 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1024 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1028 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1029 __be32 daddr
, __be32 saddr
, int nbytes
)
1031 struct tcp4_pseudohdr
*bp
;
1032 struct scatterlist sg
;
1034 bp
= &hp
->md5_blk
.ip4
;
1037 * 1. the TCP pseudo-header (in the order: source IP address,
1038 * destination IP address, zero-padded protocol number, and
1044 bp
->protocol
= IPPROTO_TCP
;
1045 bp
->len
= cpu_to_be16(nbytes
);
1047 sg_init_one(&sg
, bp
, sizeof(*bp
));
1048 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1051 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1052 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1054 struct tcp_md5sig_pool
*hp
;
1055 struct hash_desc
*desc
;
1057 hp
= tcp_get_md5sig_pool();
1059 goto clear_hash_noput
;
1060 desc
= &hp
->md5_desc
;
1062 if (crypto_hash_init(desc
))
1064 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1066 if (tcp_md5_hash_header(hp
, th
))
1068 if (tcp_md5_hash_key(hp
, key
))
1070 if (crypto_hash_final(desc
, md5_hash
))
1073 tcp_put_md5sig_pool();
1077 tcp_put_md5sig_pool();
1079 memset(md5_hash
, 0, 16);
1083 int tcp_v4_md5_hash_skb(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1084 const struct sock
*sk
,
1085 const struct sk_buff
*skb
)
1087 struct tcp_md5sig_pool
*hp
;
1088 struct hash_desc
*desc
;
1089 const struct tcphdr
*th
= tcp_hdr(skb
);
1090 __be32 saddr
, daddr
;
1092 if (sk
) { /* valid for establish/request sockets */
1093 saddr
= sk
->sk_rcv_saddr
;
1094 daddr
= sk
->sk_daddr
;
1096 const struct iphdr
*iph
= ip_hdr(skb
);
1101 hp
= tcp_get_md5sig_pool();
1103 goto clear_hash_noput
;
1104 desc
= &hp
->md5_desc
;
1106 if (crypto_hash_init(desc
))
1109 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1111 if (tcp_md5_hash_header(hp
, th
))
1113 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1115 if (tcp_md5_hash_key(hp
, key
))
1117 if (crypto_hash_final(desc
, md5_hash
))
1120 tcp_put_md5sig_pool();
1124 tcp_put_md5sig_pool();
1126 memset(md5_hash
, 0, 16);
1129 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1133 /* Called with rcu_read_lock() */
1134 static bool tcp_v4_inbound_md5_hash(const struct sock
*sk
,
1135 const struct sk_buff
*skb
)
1137 #ifdef CONFIG_TCP_MD5SIG
1139 * This gets called for each TCP segment that arrives
1140 * so we want to be efficient.
1141 * We have 3 drop cases:
1142 * o No MD5 hash and one expected.
1143 * o MD5 hash and we're not expecting one.
1144 * o MD5 hash and its wrong.
1146 const __u8
*hash_location
= NULL
;
1147 struct tcp_md5sig_key
*hash_expected
;
1148 const struct iphdr
*iph
= ip_hdr(skb
);
1149 const struct tcphdr
*th
= tcp_hdr(skb
);
1151 unsigned char newhash
[16];
1153 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1155 hash_location
= tcp_parse_md5sig_option(th
);
1157 /* We've parsed the options - do we have a hash? */
1158 if (!hash_expected
&& !hash_location
)
1161 if (hash_expected
&& !hash_location
) {
1162 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1166 if (!hash_expected
&& hash_location
) {
1167 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1171 /* Okay, so this is hash_expected and hash_location -
1172 * so we need to calculate the checksum.
1174 genhash
= tcp_v4_md5_hash_skb(newhash
,
1178 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1179 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1180 &iph
->saddr
, ntohs(th
->source
),
1181 &iph
->daddr
, ntohs(th
->dest
),
1182 genhash
? " tcp_v4_calc_md5_hash failed"
1191 static void tcp_v4_init_req(struct request_sock
*req
,
1192 const struct sock
*sk_listener
,
1193 struct sk_buff
*skb
)
1195 struct inet_request_sock
*ireq
= inet_rsk(req
);
1197 sk_rcv_saddr_set(req_to_sk(req
), ip_hdr(skb
)->daddr
);
1198 sk_daddr_set(req_to_sk(req
), ip_hdr(skb
)->saddr
);
1199 ireq
->no_srccheck
= inet_sk(sk_listener
)->transparent
;
1200 ireq
->opt
= tcp_v4_save_options(skb
);
1203 static struct dst_entry
*tcp_v4_route_req(const struct sock
*sk
,
1205 const struct request_sock
*req
,
1208 struct dst_entry
*dst
= inet_csk_route_req(sk
, &fl
->u
.ip4
, req
);
1211 if (fl
->u
.ip4
.daddr
== inet_rsk(req
)->ir_rmt_addr
)
1220 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1222 .obj_size
= sizeof(struct tcp_request_sock
),
1223 .rtx_syn_ack
= tcp_rtx_synack
,
1224 .send_ack
= tcp_v4_reqsk_send_ack
,
1225 .destructor
= tcp_v4_reqsk_destructor
,
1226 .send_reset
= tcp_v4_send_reset
,
1227 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1230 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1231 .mss_clamp
= TCP_MSS_DEFAULT
,
1232 #ifdef CONFIG_TCP_MD5SIG
1233 .req_md5_lookup
= tcp_v4_md5_lookup
,
1234 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1236 .init_req
= tcp_v4_init_req
,
1237 #ifdef CONFIG_SYN_COOKIES
1238 .cookie_init_seq
= cookie_v4_init_sequence
,
1240 .route_req
= tcp_v4_route_req
,
1241 .init_seq
= tcp_v4_init_sequence
,
1242 .send_synack
= tcp_v4_send_synack
,
1245 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1247 /* Never answer to SYNs send to broadcast or multicast */
1248 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1251 return tcp_conn_request(&tcp_request_sock_ops
,
1252 &tcp_request_sock_ipv4_ops
, sk
, skb
);
1255 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1258 EXPORT_SYMBOL(tcp_v4_conn_request
);
1262 * The three way handshake has completed - we got a valid synack -
1263 * now create the new socket.
1265 struct sock
*tcp_v4_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1266 struct request_sock
*req
,
1267 struct dst_entry
*dst
,
1268 struct request_sock
*req_unhash
,
1271 struct inet_request_sock
*ireq
;
1272 struct inet_sock
*newinet
;
1273 struct tcp_sock
*newtp
;
1275 #ifdef CONFIG_TCP_MD5SIG
1276 struct tcp_md5sig_key
*key
;
1278 struct ip_options_rcu
*inet_opt
;
1280 if (sk_acceptq_is_full(sk
))
1283 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1287 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1288 inet_sk_rx_dst_set(newsk
, skb
);
1290 newtp
= tcp_sk(newsk
);
1291 newinet
= inet_sk(newsk
);
1292 ireq
= inet_rsk(req
);
1293 sk_daddr_set(newsk
, ireq
->ir_rmt_addr
);
1294 sk_rcv_saddr_set(newsk
, ireq
->ir_loc_addr
);
1295 newinet
->inet_saddr
= ireq
->ir_loc_addr
;
1296 inet_opt
= ireq
->opt
;
1297 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1299 newinet
->mc_index
= inet_iif(skb
);
1300 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1301 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1302 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1304 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1305 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1308 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1312 /* syncookie case : see end of cookie_v4_check() */
1314 sk_setup_caps(newsk
, dst
);
1316 tcp_ca_openreq_child(newsk
, dst
);
1318 tcp_sync_mss(newsk
, dst_mtu(dst
));
1319 newtp
->advmss
= dst_metric_advmss(dst
);
1320 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1321 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1322 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1324 tcp_initialize_rcv_mss(newsk
);
1326 #ifdef CONFIG_TCP_MD5SIG
1327 /* Copy over the MD5 key from the original socket */
1328 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1332 * We're using one, so create a matching key
1333 * on the newsk structure. If we fail to get
1334 * memory, then we end up not copying the key
1337 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1338 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1339 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1343 if (__inet_inherit_port(sk
, newsk
) < 0)
1345 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1347 tcp_move_syn(newtp
, req
);
1352 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1356 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1359 inet_csk_prepare_forced_close(newsk
);
1363 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1365 static struct sock
*tcp_v4_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
1367 #ifdef CONFIG_SYN_COOKIES
1368 const struct tcphdr
*th
= tcp_hdr(skb
);
1371 sk
= cookie_v4_check(sk
, skb
);
1376 /* The socket must have it's spinlock held when we get
1377 * here, unless it is a TCP_LISTEN socket.
1379 * We have a potential double-lock case here, so even when
1380 * doing backlog processing we use the BH locking scheme.
1381 * This is because we cannot sleep with the original spinlock
1384 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1388 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1389 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1391 sock_rps_save_rxhash(sk
, skb
);
1392 sk_mark_napi_id(sk
, skb
);
1394 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1395 !dst
->ops
->check(dst
, 0)) {
1397 sk
->sk_rx_dst
= NULL
;
1400 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1404 if (tcp_checksum_complete(skb
))
1407 if (sk
->sk_state
== TCP_LISTEN
) {
1408 struct sock
*nsk
= tcp_v4_cookie_check(sk
, skb
);
1413 sock_rps_save_rxhash(nsk
, skb
);
1414 sk_mark_napi_id(nsk
, skb
);
1415 if (tcp_child_process(sk
, nsk
, skb
)) {
1422 sock_rps_save_rxhash(sk
, skb
);
1424 if (tcp_rcv_state_process(sk
, skb
)) {
1431 tcp_v4_send_reset(rsk
, skb
);
1434 /* Be careful here. If this function gets more complicated and
1435 * gcc suffers from register pressure on the x86, sk (in %ebx)
1436 * might be destroyed here. This current version compiles correctly,
1437 * but you have been warned.
1442 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1443 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1446 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1448 void tcp_v4_early_demux(struct sk_buff
*skb
)
1450 const struct iphdr
*iph
;
1451 const struct tcphdr
*th
;
1454 if (skb
->pkt_type
!= PACKET_HOST
)
1457 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1463 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1466 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1467 iph
->saddr
, th
->source
,
1468 iph
->daddr
, ntohs(th
->dest
),
1472 skb
->destructor
= sock_edemux
;
1473 if (sk_fullsock(sk
)) {
1474 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1477 dst
= dst_check(dst
, 0);
1479 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1480 skb_dst_set_noref(skb
, dst
);
1485 /* Packet is added to VJ-style prequeue for processing in process
1486 * context, if a reader task is waiting. Apparently, this exciting
1487 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1488 * failed somewhere. Latency? Burstiness? Well, at least now we will
1489 * see, why it failed. 8)8) --ANK
1492 bool tcp_prequeue(struct sock
*sk
, struct sk_buff
*skb
)
1494 struct tcp_sock
*tp
= tcp_sk(sk
);
1496 if (sysctl_tcp_low_latency
|| !tp
->ucopy
.task
)
1499 if (skb
->len
<= tcp_hdrlen(skb
) &&
1500 skb_queue_len(&tp
->ucopy
.prequeue
) == 0)
1503 /* Before escaping RCU protected region, we need to take care of skb
1504 * dst. Prequeue is only enabled for established sockets.
1505 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1506 * Instead of doing full sk_rx_dst validity here, let's perform
1507 * an optimistic check.
1509 if (likely(sk
->sk_rx_dst
))
1512 skb_dst_force_safe(skb
);
1514 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
);
1515 tp
->ucopy
.memory
+= skb
->truesize
;
1516 if (tp
->ucopy
.memory
> sk
->sk_rcvbuf
) {
1517 struct sk_buff
*skb1
;
1519 BUG_ON(sock_owned_by_user(sk
));
1521 while ((skb1
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
) {
1522 sk_backlog_rcv(sk
, skb1
);
1523 NET_INC_STATS_BH(sock_net(sk
),
1524 LINUX_MIB_TCPPREQUEUEDROPPED
);
1527 tp
->ucopy
.memory
= 0;
1528 } else if (skb_queue_len(&tp
->ucopy
.prequeue
) == 1) {
1529 wake_up_interruptible_sync_poll(sk_sleep(sk
),
1530 POLLIN
| POLLRDNORM
| POLLRDBAND
);
1531 if (!inet_csk_ack_scheduled(sk
))
1532 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
1533 (3 * tcp_rto_min(sk
)) / 4,
1538 EXPORT_SYMBOL(tcp_prequeue
);
1540 int tcp_filter(struct sock
*sk
, struct sk_buff
*skb
)
1542 struct tcphdr
*th
= (struct tcphdr
*)skb
->data
;
1543 unsigned int eaten
= skb
->len
;
1546 err
= sk_filter_trim_cap(sk
, skb
, th
->doff
* 4);
1549 TCP_SKB_CB(skb
)->end_seq
-= eaten
;
1553 EXPORT_SYMBOL(tcp_filter
);
1559 int tcp_v4_rcv(struct sk_buff
*skb
)
1561 const struct iphdr
*iph
;
1562 const struct tcphdr
*th
;
1565 struct net
*net
= dev_net(skb
->dev
);
1567 if (skb
->pkt_type
!= PACKET_HOST
)
1570 /* Count it even if it's bad */
1571 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1573 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1578 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1580 if (!pskb_may_pull(skb
, th
->doff
* 4))
1583 /* An explanation is required here, I think.
1584 * Packet length and doff are validated by header prediction,
1585 * provided case of th->doff==0 is eliminated.
1586 * So, we defer the checks. */
1588 if (skb_checksum_init(skb
, IPPROTO_TCP
, inet_compute_pseudo
))
1593 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1594 * barrier() makes sure compiler wont play fool^Waliasing games.
1596 memmove(&TCP_SKB_CB(skb
)->header
.h4
, IPCB(skb
),
1597 sizeof(struct inet_skb_parm
));
1600 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1601 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1602 skb
->len
- th
->doff
* 4);
1603 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1604 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1605 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1606 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1607 TCP_SKB_CB(skb
)->sacked
= 0;
1610 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1615 if (sk
->sk_state
== TCP_TIME_WAIT
)
1618 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1619 struct request_sock
*req
= inet_reqsk(sk
);
1622 sk
= req
->rsk_listener
;
1623 if (unlikely(tcp_v4_inbound_md5_hash(sk
, skb
))) {
1627 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1628 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1632 nsk
= tcp_check_req(sk
, skb
, req
, false);
1635 goto discard_and_relse
;
1639 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1640 tcp_v4_send_reset(nsk
, skb
);
1641 goto discard_and_relse
;
1647 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1648 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1649 goto discard_and_relse
;
1652 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1653 goto discard_and_relse
;
1655 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1656 goto discard_and_relse
;
1660 if (tcp_filter(sk
, skb
))
1661 goto discard_and_relse
;
1662 th
= (const struct tcphdr
*)skb
->data
;
1667 if (sk
->sk_state
== TCP_LISTEN
) {
1668 ret
= tcp_v4_do_rcv(sk
, skb
);
1669 goto put_and_return
;
1672 sk_incoming_cpu_update(sk
);
1674 bh_lock_sock_nested(sk
);
1675 tcp_sk(sk
)->segs_in
+= max_t(u16
, 1, skb_shinfo(skb
)->gso_segs
);
1677 if (!sock_owned_by_user(sk
)) {
1678 if (!tcp_prequeue(sk
, skb
))
1679 ret
= tcp_v4_do_rcv(sk
, skb
);
1680 } else if (unlikely(sk_add_backlog(sk
, skb
,
1681 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1683 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1684 goto discard_and_relse
;
1694 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1697 if (tcp_checksum_complete(skb
)) {
1699 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1701 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1703 tcp_v4_send_reset(NULL
, skb
);
1707 /* Discard frame. */
1716 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1717 inet_twsk_put(inet_twsk(sk
));
1721 if (tcp_checksum_complete(skb
)) {
1722 inet_twsk_put(inet_twsk(sk
));
1725 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1727 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1729 iph
->saddr
, th
->source
,
1730 iph
->daddr
, th
->dest
,
1733 inet_twsk_deschedule_put(inet_twsk(sk
));
1737 /* Fall through to ACK */
1740 tcp_v4_timewait_ack(sk
, skb
);
1744 case TCP_TW_SUCCESS
:;
1749 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1750 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1751 .twsk_unique
= tcp_twsk_unique
,
1752 .twsk_destructor
= tcp_twsk_destructor
,
1755 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
1757 struct dst_entry
*dst
= skb_dst(skb
);
1759 if (dst
&& dst_hold_safe(dst
)) {
1760 sk
->sk_rx_dst
= dst
;
1761 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
1764 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
1766 const struct inet_connection_sock_af_ops ipv4_specific
= {
1767 .queue_xmit
= ip_queue_xmit
,
1768 .send_check
= tcp_v4_send_check
,
1769 .rebuild_header
= inet_sk_rebuild_header
,
1770 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1771 .conn_request
= tcp_v4_conn_request
,
1772 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1773 .net_header_len
= sizeof(struct iphdr
),
1774 .setsockopt
= ip_setsockopt
,
1775 .getsockopt
= ip_getsockopt
,
1776 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1777 .sockaddr_len
= sizeof(struct sockaddr_in
),
1778 .bind_conflict
= inet_csk_bind_conflict
,
1779 #ifdef CONFIG_COMPAT
1780 .compat_setsockopt
= compat_ip_setsockopt
,
1781 .compat_getsockopt
= compat_ip_getsockopt
,
1783 .mtu_reduced
= tcp_v4_mtu_reduced
,
1785 EXPORT_SYMBOL(ipv4_specific
);
1787 #ifdef CONFIG_TCP_MD5SIG
1788 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1789 .md5_lookup
= tcp_v4_md5_lookup
,
1790 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1791 .md5_parse
= tcp_v4_parse_md5_keys
,
1795 /* NOTE: A lot of things set to zero explicitly by call to
1796 * sk_alloc() so need not be done here.
1798 static int tcp_v4_init_sock(struct sock
*sk
)
1800 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1804 icsk
->icsk_af_ops
= &ipv4_specific
;
1806 #ifdef CONFIG_TCP_MD5SIG
1807 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
1813 void tcp_v4_destroy_sock(struct sock
*sk
)
1815 struct tcp_sock
*tp
= tcp_sk(sk
);
1817 tcp_clear_xmit_timers(sk
);
1819 tcp_cleanup_congestion_control(sk
);
1821 /* Cleanup up the write buffer. */
1822 tcp_write_queue_purge(sk
);
1824 /* Cleans up our, hopefully empty, out_of_order_queue. */
1825 __skb_queue_purge(&tp
->out_of_order_queue
);
1827 #ifdef CONFIG_TCP_MD5SIG
1828 /* Clean up the MD5 key list, if any */
1829 if (tp
->md5sig_info
) {
1830 tcp_clear_md5_list(sk
);
1831 kfree_rcu(tp
->md5sig_info
, rcu
);
1832 tp
->md5sig_info
= NULL
;
1836 /* Clean prequeue, it must be empty really */
1837 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1839 /* Clean up a referenced TCP bind bucket. */
1840 if (inet_csk(sk
)->icsk_bind_hash
)
1843 BUG_ON(tp
->fastopen_rsk
);
1845 /* If socket is aborted during connect operation */
1846 tcp_free_fastopen_req(tp
);
1847 tcp_saved_syn_free(tp
);
1849 sk_sockets_allocated_dec(sk
);
1850 sock_release_memcg(sk
);
1852 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1854 #ifdef CONFIG_PROC_FS
1855 /* Proc filesystem TCP sock list dumping. */
1858 * Get next listener socket follow cur. If cur is NULL, get first socket
1859 * starting from bucket given in st->bucket; when st->bucket is zero the
1860 * very first socket in the hash table is returned.
1862 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1864 struct inet_connection_sock
*icsk
;
1865 struct hlist_nulls_node
*node
;
1866 struct sock
*sk
= cur
;
1867 struct inet_listen_hashbucket
*ilb
;
1868 struct tcp_iter_state
*st
= seq
->private;
1869 struct net
*net
= seq_file_net(seq
);
1872 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1873 spin_lock_bh(&ilb
->lock
);
1874 sk
= sk_nulls_head(&ilb
->head
);
1878 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1882 sk
= sk_nulls_next(sk
);
1884 sk_nulls_for_each_from(sk
, node
) {
1885 if (!net_eq(sock_net(sk
), net
))
1887 if (sk
->sk_family
== st
->family
) {
1891 icsk
= inet_csk(sk
);
1893 spin_unlock_bh(&ilb
->lock
);
1895 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
1896 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1897 spin_lock_bh(&ilb
->lock
);
1898 sk
= sk_nulls_head(&ilb
->head
);
1906 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
1908 struct tcp_iter_state
*st
= seq
->private;
1913 rc
= listening_get_next(seq
, NULL
);
1915 while (rc
&& *pos
) {
1916 rc
= listening_get_next(seq
, rc
);
1922 static inline bool empty_bucket(const struct tcp_iter_state
*st
)
1924 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
1928 * Get first established socket starting from bucket given in st->bucket.
1929 * If st->bucket is zero, the very first socket in the hash is returned.
1931 static void *established_get_first(struct seq_file
*seq
)
1933 struct tcp_iter_state
*st
= seq
->private;
1934 struct net
*net
= seq_file_net(seq
);
1938 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
1940 struct hlist_nulls_node
*node
;
1941 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
1943 /* Lockless fast path for the common case of empty buckets */
1944 if (empty_bucket(st
))
1948 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
1949 if (sk
->sk_family
!= st
->family
||
1950 !net_eq(sock_net(sk
), net
)) {
1956 spin_unlock_bh(lock
);
1962 static void *established_get_next(struct seq_file
*seq
, void *cur
)
1964 struct sock
*sk
= cur
;
1965 struct hlist_nulls_node
*node
;
1966 struct tcp_iter_state
*st
= seq
->private;
1967 struct net
*net
= seq_file_net(seq
);
1972 sk
= sk_nulls_next(sk
);
1974 sk_nulls_for_each_from(sk
, node
) {
1975 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
1979 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
1981 return established_get_first(seq
);
1984 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
1986 struct tcp_iter_state
*st
= seq
->private;
1990 rc
= established_get_first(seq
);
1993 rc
= established_get_next(seq
, rc
);
1999 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2002 struct tcp_iter_state
*st
= seq
->private;
2004 st
->state
= TCP_SEQ_STATE_LISTENING
;
2005 rc
= listening_get_idx(seq
, &pos
);
2008 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2009 rc
= established_get_idx(seq
, pos
);
2015 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2017 struct tcp_iter_state
*st
= seq
->private;
2018 int offset
= st
->offset
;
2019 int orig_num
= st
->num
;
2022 switch (st
->state
) {
2023 case TCP_SEQ_STATE_LISTENING
:
2024 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2026 st
->state
= TCP_SEQ_STATE_LISTENING
;
2027 rc
= listening_get_next(seq
, NULL
);
2028 while (offset
-- && rc
)
2029 rc
= listening_get_next(seq
, rc
);
2033 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2035 case TCP_SEQ_STATE_ESTABLISHED
:
2036 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2038 rc
= established_get_first(seq
);
2039 while (offset
-- && rc
)
2040 rc
= established_get_next(seq
, rc
);
2048 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2050 struct tcp_iter_state
*st
= seq
->private;
2053 if (*pos
&& *pos
== st
->last_pos
) {
2054 rc
= tcp_seek_last_pos(seq
);
2059 st
->state
= TCP_SEQ_STATE_LISTENING
;
2063 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2066 st
->last_pos
= *pos
;
2070 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2072 struct tcp_iter_state
*st
= seq
->private;
2075 if (v
== SEQ_START_TOKEN
) {
2076 rc
= tcp_get_idx(seq
, 0);
2080 switch (st
->state
) {
2081 case TCP_SEQ_STATE_LISTENING
:
2082 rc
= listening_get_next(seq
, v
);
2084 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2087 rc
= established_get_first(seq
);
2090 case TCP_SEQ_STATE_ESTABLISHED
:
2091 rc
= established_get_next(seq
, v
);
2096 st
->last_pos
= *pos
;
2100 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2102 struct tcp_iter_state
*st
= seq
->private;
2104 switch (st
->state
) {
2105 case TCP_SEQ_STATE_LISTENING
:
2106 if (v
!= SEQ_START_TOKEN
)
2107 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2109 case TCP_SEQ_STATE_ESTABLISHED
:
2111 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2116 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2118 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2119 struct tcp_iter_state
*s
;
2122 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2123 sizeof(struct tcp_iter_state
));
2127 s
= ((struct seq_file
*)file
->private_data
)->private;
2128 s
->family
= afinfo
->family
;
2132 EXPORT_SYMBOL(tcp_seq_open
);
2134 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2137 struct proc_dir_entry
*p
;
2139 afinfo
->seq_ops
.start
= tcp_seq_start
;
2140 afinfo
->seq_ops
.next
= tcp_seq_next
;
2141 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2143 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2144 afinfo
->seq_fops
, afinfo
);
2149 EXPORT_SYMBOL(tcp_proc_register
);
2151 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2153 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2155 EXPORT_SYMBOL(tcp_proc_unregister
);
2157 static void get_openreq4(const struct request_sock
*req
,
2158 struct seq_file
*f
, int i
)
2160 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2161 long delta
= req
->rsk_timer
.expires
- jiffies
;
2163 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2164 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2169 ntohs(ireq
->ir_rmt_port
),
2171 0, 0, /* could print option size, but that is af dependent. */
2172 1, /* timers active (only the expire timer) */
2173 jiffies_delta_to_clock_t(delta
),
2175 from_kuid_munged(seq_user_ns(f
),
2176 sock_i_uid(req
->rsk_listener
)),
2177 0, /* non standard timer */
2178 0, /* open_requests have no inode */
2183 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
)
2186 unsigned long timer_expires
;
2187 const struct tcp_sock
*tp
= tcp_sk(sk
);
2188 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2189 const struct inet_sock
*inet
= inet_sk(sk
);
2190 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
2191 __be32 dest
= inet
->inet_daddr
;
2192 __be32 src
= inet
->inet_rcv_saddr
;
2193 __u16 destp
= ntohs(inet
->inet_dport
);
2194 __u16 srcp
= ntohs(inet
->inet_sport
);
2198 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2199 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
2200 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2202 timer_expires
= icsk
->icsk_timeout
;
2203 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2205 timer_expires
= icsk
->icsk_timeout
;
2206 } else if (timer_pending(&sk
->sk_timer
)) {
2208 timer_expires
= sk
->sk_timer
.expires
;
2211 timer_expires
= jiffies
;
2214 state
= sk_state_load(sk
);
2215 if (state
== TCP_LISTEN
)
2216 rx_queue
= sk
->sk_ack_backlog
;
2218 /* Because we don't lock the socket,
2219 * we might find a transient negative value.
2221 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2223 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2224 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2225 i
, src
, srcp
, dest
, destp
, state
,
2226 tp
->write_seq
- tp
->snd_una
,
2229 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2230 icsk
->icsk_retransmits
,
2231 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2232 icsk
->icsk_probes_out
,
2234 atomic_read(&sk
->sk_refcnt
), sk
,
2235 jiffies_to_clock_t(icsk
->icsk_rto
),
2236 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2237 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2239 state
== TCP_LISTEN
?
2240 fastopenq
->max_qlen
:
2241 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
));
2244 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2245 struct seq_file
*f
, int i
)
2247 long delta
= tw
->tw_timer
.expires
- jiffies
;
2251 dest
= tw
->tw_daddr
;
2252 src
= tw
->tw_rcv_saddr
;
2253 destp
= ntohs(tw
->tw_dport
);
2254 srcp
= ntohs(tw
->tw_sport
);
2256 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2257 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2258 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2259 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2260 atomic_read(&tw
->tw_refcnt
), tw
);
2265 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2267 struct tcp_iter_state
*st
;
2268 struct sock
*sk
= v
;
2270 seq_setwidth(seq
, TMPSZ
- 1);
2271 if (v
== SEQ_START_TOKEN
) {
2272 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2273 "rx_queue tr tm->when retrnsmt uid timeout "
2279 if (sk
->sk_state
== TCP_TIME_WAIT
)
2280 get_timewait4_sock(v
, seq
, st
->num
);
2281 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
2282 get_openreq4(v
, seq
, st
->num
);
2284 get_tcp4_sock(v
, seq
, st
->num
);
2290 static const struct file_operations tcp_afinfo_seq_fops
= {
2291 .owner
= THIS_MODULE
,
2292 .open
= tcp_seq_open
,
2294 .llseek
= seq_lseek
,
2295 .release
= seq_release_net
2298 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2301 .seq_fops
= &tcp_afinfo_seq_fops
,
2303 .show
= tcp4_seq_show
,
2307 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2309 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2312 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2314 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2317 static struct pernet_operations tcp4_net_ops
= {
2318 .init
= tcp4_proc_init_net
,
2319 .exit
= tcp4_proc_exit_net
,
2322 int __init
tcp4_proc_init(void)
2324 return register_pernet_subsys(&tcp4_net_ops
);
2327 void tcp4_proc_exit(void)
2329 unregister_pernet_subsys(&tcp4_net_ops
);
2331 #endif /* CONFIG_PROC_FS */
2333 struct proto tcp_prot
= {
2335 .owner
= THIS_MODULE
,
2337 .connect
= tcp_v4_connect
,
2338 .disconnect
= tcp_disconnect
,
2339 .accept
= inet_csk_accept
,
2341 .init
= tcp_v4_init_sock
,
2342 .destroy
= tcp_v4_destroy_sock
,
2343 .shutdown
= tcp_shutdown
,
2344 .setsockopt
= tcp_setsockopt
,
2345 .getsockopt
= tcp_getsockopt
,
2346 .recvmsg
= tcp_recvmsg
,
2347 .sendmsg
= tcp_sendmsg
,
2348 .sendpage
= tcp_sendpage
,
2349 .backlog_rcv
= tcp_v4_do_rcv
,
2350 .release_cb
= tcp_release_cb
,
2352 .unhash
= inet_unhash
,
2353 .get_port
= inet_csk_get_port
,
2354 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2355 .stream_memory_free
= tcp_stream_memory_free
,
2356 .sockets_allocated
= &tcp_sockets_allocated
,
2357 .orphan_count
= &tcp_orphan_count
,
2358 .memory_allocated
= &tcp_memory_allocated
,
2359 .memory_pressure
= &tcp_memory_pressure
,
2360 .sysctl_mem
= sysctl_tcp_mem
,
2361 .sysctl_wmem
= sysctl_tcp_wmem
,
2362 .sysctl_rmem
= sysctl_tcp_rmem
,
2363 .max_header
= MAX_TCP_HEADER
,
2364 .obj_size
= sizeof(struct tcp_sock
),
2365 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2366 .twsk_prot
= &tcp_timewait_sock_ops
,
2367 .rsk_prot
= &tcp_request_sock_ops
,
2368 .h
.hashinfo
= &tcp_hashinfo
,
2369 .no_autobind
= true,
2370 #ifdef CONFIG_COMPAT
2371 .compat_setsockopt
= compat_tcp_setsockopt
,
2372 .compat_getsockopt
= compat_tcp_getsockopt
,
2374 #ifdef CONFIG_MEMCG_KMEM
2375 .init_cgroup
= tcp_init_cgroup
,
2376 .destroy_cgroup
= tcp_destroy_cgroup
,
2377 .proto_cgroup
= tcp_proto_cgroup
,
2380 EXPORT_SYMBOL(tcp_prot
);
2382 static void __net_exit
tcp_sk_exit(struct net
*net
)
2386 for_each_possible_cpu(cpu
)
2387 inet_ctl_sock_destroy(*per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
));
2388 free_percpu(net
->ipv4
.tcp_sk
);
2391 static int __net_init
tcp_sk_init(struct net
*net
)
2395 net
->ipv4
.tcp_sk
= alloc_percpu(struct sock
*);
2396 if (!net
->ipv4
.tcp_sk
)
2399 for_each_possible_cpu(cpu
) {
2402 res
= inet_ctl_sock_create(&sk
, PF_INET
, SOCK_RAW
,
2406 *per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
) = sk
;
2409 net
->ipv4
.sysctl_tcp_ecn
= 2;
2410 net
->ipv4
.sysctl_tcp_ecn_fallback
= 1;
2412 net
->ipv4
.sysctl_tcp_base_mss
= TCP_BASE_MSS
;
2413 net
->ipv4
.sysctl_tcp_probe_threshold
= TCP_PROBE_THRESHOLD
;
2414 net
->ipv4
.sysctl_tcp_probe_interval
= TCP_PROBE_INTERVAL
;
2423 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2425 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2428 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2429 .init
= tcp_sk_init
,
2430 .exit
= tcp_sk_exit
,
2431 .exit_batch
= tcp_sk_exit_batch
,
2434 void __init
tcp_v4_init(void)
2436 inet_hashinfo_init(&tcp_hashinfo
);
2437 if (register_pernet_subsys(&tcp_sk_ops
))
2438 panic("Failed to create the TCP control socket.\n");