2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly
;
89 int sysctl_tcp_low_latency __read_mostly
;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
95 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
98 struct inet_hashinfo tcp_hashinfo
;
99 EXPORT_SYMBOL(tcp_hashinfo
);
101 static inline __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
103 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
106 tcp_hdr(skb
)->source
);
109 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
111 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
112 struct tcp_sock
*tp
= tcp_sk(sk
);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw
->tw_ts_recent_stamp
&&
126 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
127 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
128 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
129 if (tp
->write_seq
== 0)
131 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
132 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
144 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
145 struct inet_sock
*inet
= inet_sk(sk
);
146 struct tcp_sock
*tp
= tcp_sk(sk
);
147 __be16 orig_sport
, orig_dport
;
148 __be32 daddr
, nexthop
;
152 struct ip_options_rcu
*inet_opt
;
154 if (addr_len
< sizeof(struct sockaddr_in
))
157 if (usin
->sin_family
!= AF_INET
)
158 return -EAFNOSUPPORT
;
160 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
161 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
162 sock_owned_by_user(sk
));
163 if (inet_opt
&& inet_opt
->opt
.srr
) {
166 nexthop
= inet_opt
->opt
.faddr
;
169 orig_sport
= inet
->inet_sport
;
170 orig_dport
= usin
->sin_port
;
171 fl4
= &inet
->cork
.fl
.u
.ip4
;
172 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
173 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
175 orig_sport
, orig_dport
, sk
, true);
178 if (err
== -ENETUNREACH
)
179 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
183 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
188 if (!inet_opt
|| !inet_opt
->opt
.srr
)
191 if (!inet
->inet_saddr
)
192 inet
->inet_saddr
= fl4
->saddr
;
193 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
195 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
196 /* Reset inherited state */
197 tp
->rx_opt
.ts_recent
= 0;
198 tp
->rx_opt
.ts_recent_stamp
= 0;
199 if (likely(!tp
->repair
))
203 if (tcp_death_row
.sysctl_tw_recycle
&&
204 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
205 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
207 inet
->inet_dport
= usin
->sin_port
;
208 inet
->inet_daddr
= daddr
;
210 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
212 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
214 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
221 tcp_set_state(sk
, TCP_SYN_SENT
);
222 err
= inet_hash_connect(&tcp_death_row
, sk
);
226 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
227 inet
->inet_sport
, inet
->inet_dport
, sk
);
233 /* OK, now commit destination to socket. */
234 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
235 sk_setup_caps(sk
, &rt
->dst
);
237 if (!tp
->write_seq
&& likely(!tp
->repair
))
238 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
243 inet
->inet_id
= tp
->write_seq
^ jiffies
;
245 err
= tcp_connect(sk
);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk
, TCP_CLOSE
);
260 sk
->sk_route_caps
= 0;
261 inet
->inet_dport
= 0;
264 EXPORT_SYMBOL(tcp_v4_connect
);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 void tcp_v4_mtu_reduced(struct sock
*sk
)
273 struct dst_entry
*dst
;
274 struct inet_sock
*inet
= inet_sk(sk
);
275 u32 mtu
= tcp_sk(sk
)->mtu_info
;
277 dst
= inet_csk_update_pmtu(sk
, mtu
);
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
284 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
285 sk
->sk_err_soft
= EMSGSIZE
;
289 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
290 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
291 tcp_sync_mss(sk
, mtu
);
293 /* Resend the TCP packet because it's
294 * clear that the old packet has been
295 * dropped. This is the new "fast" path mtu
298 tcp_simple_retransmit(sk
);
299 } /* else let the usual retransmit timer handle it */
301 EXPORT_SYMBOL(tcp_v4_mtu_reduced
);
303 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
305 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
308 dst
->ops
->redirect(dst
, sk
, skb
);
312 * This routine is called by the ICMP module when it gets some
313 * sort of error condition. If err < 0 then the socket should
314 * be closed and the error returned to the user. If err > 0
315 * it's just the icmp type << 8 | icmp code. After adjustment
316 * header points to the first 8 bytes of the tcp header. We need
317 * to find the appropriate port.
319 * The locking strategy used here is very "optimistic". When
320 * someone else accesses the socket the ICMP is just dropped
321 * and for some paths there is no check at all.
322 * A more general error queue to queue errors for later handling
323 * is probably better.
327 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
329 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
330 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
331 struct inet_connection_sock
*icsk
;
333 struct inet_sock
*inet
;
334 const int type
= icmp_hdr(icmp_skb
)->type
;
335 const int code
= icmp_hdr(icmp_skb
)->code
;
338 struct request_sock
*req
;
342 struct net
*net
= dev_net(icmp_skb
->dev
);
344 if (icmp_skb
->len
< (iph
->ihl
<< 2) + 8) {
345 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
349 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
350 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
352 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
355 if (sk
->sk_state
== TCP_TIME_WAIT
) {
356 inet_twsk_put(inet_twsk(sk
));
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
363 * We do take care of PMTU discovery (RFC1191) special case :
364 * we can receive locally generated ICMP messages while socket is held.
366 if (sock_owned_by_user(sk
)) {
367 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
368 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
370 if (sk
->sk_state
== TCP_CLOSE
)
373 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
374 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
380 req
= tp
->fastopen_rsk
;
381 seq
= ntohl(th
->seq
);
382 if (sk
->sk_state
!= TCP_LISTEN
&&
383 !between(seq
, tp
->snd_una
, tp
->snd_nxt
) &&
384 (req
== NULL
|| seq
!= tcp_rsk(req
)->snt_isn
)) {
385 /* For a Fast Open socket, allow seq to be snt_isn. */
386 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
392 do_redirect(icmp_skb
, sk
);
394 case ICMP_SOURCE_QUENCH
:
395 /* Just silently ignore these. */
397 case ICMP_PARAMETERPROB
:
400 case ICMP_DEST_UNREACH
:
401 if (code
> NR_ICMP_UNREACH
)
404 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
405 /* We are not interested in TCP_LISTEN and open_requests
406 * (SYN-ACKs send out by Linux are always <576bytes so
407 * they should go through unfragmented).
409 if (sk
->sk_state
== TCP_LISTEN
)
413 if (!sock_owned_by_user(sk
)) {
414 tcp_v4_mtu_reduced(sk
);
416 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
422 err
= icmp_err_convert
[code
].errno
;
423 /* check if icmp_skb allows revert of backoff
424 * (see draft-zimmermann-tcp-lcd) */
425 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
427 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
431 /* XXX (TFO) - revisit the following logic for TFO */
433 if (sock_owned_by_user(sk
))
436 icsk
->icsk_backoff
--;
437 inet_csk(sk
)->icsk_rto
= (tp
->srtt
? __tcp_set_rto(tp
) :
438 TCP_TIMEOUT_INIT
) << icsk
->icsk_backoff
;
441 skb
= tcp_write_queue_head(sk
);
444 remaining
= icsk
->icsk_rto
- min(icsk
->icsk_rto
,
445 tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
448 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
449 remaining
, TCP_RTO_MAX
);
451 /* RTO revert clocked out retransmission.
452 * Will retransmit now */
453 tcp_retransmit_timer(sk
);
457 case ICMP_TIME_EXCEEDED
:
464 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
465 * than following the TCP_SYN_RECV case and closing the socket,
466 * we ignore the ICMP error and keep trying like a fully established
467 * socket. Is this the right thing to do?
469 if (req
&& req
->sk
== NULL
)
472 switch (sk
->sk_state
) {
473 struct request_sock
*req
, **prev
;
475 if (sock_owned_by_user(sk
))
478 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
479 iph
->daddr
, iph
->saddr
);
483 /* ICMPs are not backlogged, hence we cannot get
484 an established socket here.
488 if (seq
!= tcp_rsk(req
)->snt_isn
) {
489 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
494 * Still in SYN_RECV, just remove it silently.
495 * There is no good way to pass the error to the newly
496 * created socket, and POSIX does not want network
497 * errors returned from accept().
499 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
500 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
504 case TCP_SYN_RECV
: /* Cannot happen.
505 It can f.e. if SYNs crossed,
508 if (!sock_owned_by_user(sk
)) {
511 sk
->sk_error_report(sk
);
515 sk
->sk_err_soft
= err
;
520 /* If we've already connected we will keep trying
521 * until we time out, or the user gives up.
523 * rfc1122 4.2.3.9 allows to consider as hard errors
524 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
525 * but it is obsoleted by pmtu discovery).
527 * Note, that in modern internet, where routing is unreliable
528 * and in each dark corner broken firewalls sit, sending random
529 * errors ordered by their masters even this two messages finally lose
530 * their original sense (even Linux sends invalid PORT_UNREACHs)
532 * Now we are in compliance with RFCs.
537 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
539 sk
->sk_error_report(sk
);
540 } else { /* Only an error on timeout */
541 sk
->sk_err_soft
= err
;
549 static void __tcp_v4_send_check(struct sk_buff
*skb
,
550 __be32 saddr
, __be32 daddr
)
552 struct tcphdr
*th
= tcp_hdr(skb
);
554 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
555 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
556 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
557 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
559 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
566 /* This routine computes an IPv4 TCP checksum. */
567 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
569 const struct inet_sock
*inet
= inet_sk(sk
);
571 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
573 EXPORT_SYMBOL(tcp_v4_send_check
);
575 int tcp_v4_gso_send_check(struct sk_buff
*skb
)
577 const struct iphdr
*iph
;
580 if (!pskb_may_pull(skb
, sizeof(*th
)))
587 skb
->ip_summed
= CHECKSUM_PARTIAL
;
588 __tcp_v4_send_check(skb
, iph
->saddr
, iph
->daddr
);
593 * This routine will send an RST to the other tcp.
595 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
597 * Answer: if a packet caused RST, it is not for a socket
598 * existing in our system, if it is matched to a socket,
599 * it is just duplicate segment or bug in other side's TCP.
600 * So that we build reply only basing on parameters
601 * arrived with segment.
602 * Exception: precedence violation. We do not implement it in any case.
605 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
607 const struct tcphdr
*th
= tcp_hdr(skb
);
610 #ifdef CONFIG_TCP_MD5SIG
611 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
614 struct ip_reply_arg arg
;
615 #ifdef CONFIG_TCP_MD5SIG
616 struct tcp_md5sig_key
*key
;
617 const __u8
*hash_location
= NULL
;
618 unsigned char newhash
[16];
620 struct sock
*sk1
= NULL
;
624 /* Never send a reset in response to a reset. */
628 if (skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
631 /* Swap the send and the receive. */
632 memset(&rep
, 0, sizeof(rep
));
633 rep
.th
.dest
= th
->source
;
634 rep
.th
.source
= th
->dest
;
635 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
639 rep
.th
.seq
= th
->ack_seq
;
642 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
643 skb
->len
- (th
->doff
<< 2));
646 memset(&arg
, 0, sizeof(arg
));
647 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
648 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
650 #ifdef CONFIG_TCP_MD5SIG
651 hash_location
= tcp_parse_md5sig_option(th
);
652 if (!sk
&& hash_location
) {
654 * active side is lost. Try to find listening socket through
655 * source port, and then find md5 key through listening socket.
656 * we are not loose security here:
657 * Incoming packet is checked with md5 hash with finding key,
658 * no RST generated if md5 hash doesn't match.
660 sk1
= __inet_lookup_listener(dev_net(skb_dst(skb
)->dev
),
661 &tcp_hashinfo
, ip_hdr(skb
)->saddr
,
662 th
->source
, ip_hdr(skb
)->daddr
,
663 ntohs(th
->source
), inet_iif(skb
));
664 /* don't send rst if it can't find key */
668 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
669 &ip_hdr(skb
)->saddr
, AF_INET
);
673 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
674 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
677 key
= sk
? tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
683 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
685 (TCPOPT_MD5SIG
<< 8) |
687 /* Update length and the length the header thinks exists */
688 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
689 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
691 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
692 key
, ip_hdr(skb
)->saddr
,
693 ip_hdr(skb
)->daddr
, &rep
.th
);
696 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
697 ip_hdr(skb
)->saddr
, /* XXX */
698 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
699 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
700 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
701 /* When socket is gone, all binding information is lost.
702 * routing might fail in this case. No choice here, if we choose to force
703 * input interface, we will misroute in case of asymmetric route.
706 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
708 net
= dev_net(skb_dst(skb
)->dev
);
709 arg
.tos
= ip_hdr(skb
)->tos
;
710 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
711 skb
, ip_hdr(skb
)->saddr
,
712 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
714 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
715 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
717 #ifdef CONFIG_TCP_MD5SIG
726 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
727 outside socket context is ugly, certainly. What can I do?
730 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
731 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
732 struct tcp_md5sig_key
*key
,
733 int reply_flags
, u8 tos
)
735 const struct tcphdr
*th
= tcp_hdr(skb
);
738 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
739 #ifdef CONFIG_TCP_MD5SIG
740 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
744 struct ip_reply_arg arg
;
745 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
747 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
748 memset(&arg
, 0, sizeof(arg
));
750 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
751 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
753 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
754 (TCPOPT_TIMESTAMP
<< 8) |
756 rep
.opt
[1] = htonl(tsval
);
757 rep
.opt
[2] = htonl(tsecr
);
758 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
761 /* Swap the send and the receive. */
762 rep
.th
.dest
= th
->source
;
763 rep
.th
.source
= th
->dest
;
764 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
765 rep
.th
.seq
= htonl(seq
);
766 rep
.th
.ack_seq
= htonl(ack
);
768 rep
.th
.window
= htons(win
);
770 #ifdef CONFIG_TCP_MD5SIG
772 int offset
= (tsecr
) ? 3 : 0;
774 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
776 (TCPOPT_MD5SIG
<< 8) |
778 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
779 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
781 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
782 key
, ip_hdr(skb
)->saddr
,
783 ip_hdr(skb
)->daddr
, &rep
.th
);
786 arg
.flags
= reply_flags
;
787 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
788 ip_hdr(skb
)->saddr
, /* XXX */
789 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
790 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
792 arg
.bound_dev_if
= oif
;
794 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
795 skb
, ip_hdr(skb
)->saddr
,
796 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
798 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
801 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
803 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
804 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
806 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
807 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
808 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
811 tcp_twsk_md5_key(tcptw
),
812 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
819 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
820 struct request_sock
*req
)
822 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
823 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
825 tcp_v4_send_ack(skb
, (sk
->sk_state
== TCP_LISTEN
) ?
826 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
827 tcp_rsk(req
)->rcv_nxt
, req
->rcv_wnd
,
831 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
833 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
838 * Send a SYN-ACK after having received a SYN.
839 * This still operates on a request_sock only, not on a big
842 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
843 struct request_sock
*req
,
847 const struct inet_request_sock
*ireq
= inet_rsk(req
);
850 struct sk_buff
* skb
;
852 /* First, grab a route. */
853 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
856 skb
= tcp_make_synack(sk
, dst
, req
, NULL
);
859 __tcp_v4_send_check(skb
, ireq
->loc_addr
, ireq
->rmt_addr
);
861 skb_set_queue_mapping(skb
, queue_mapping
);
862 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->loc_addr
,
865 err
= net_xmit_eval(err
);
866 if (!tcp_rsk(req
)->snt_synack
&& !err
)
867 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
873 static int tcp_v4_rtx_synack(struct sock
*sk
, struct request_sock
*req
)
875 int res
= tcp_v4_send_synack(sk
, NULL
, req
, 0, false);
878 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
883 * IPv4 request_sock destructor.
885 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
887 kfree(inet_rsk(req
)->opt
);
891 * Return true if a syncookie should be sent
893 bool tcp_syn_flood_action(struct sock
*sk
,
894 const struct sk_buff
*skb
,
897 const char *msg
= "Dropping request";
898 bool want_cookie
= false;
899 struct listen_sock
*lopt
;
903 #ifdef CONFIG_SYN_COOKIES
904 if (sysctl_tcp_syncookies
) {
905 msg
= "Sending cookies";
907 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDOCOOKIES
);
910 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDROP
);
912 lopt
= inet_csk(sk
)->icsk_accept_queue
.listen_opt
;
913 if (!lopt
->synflood_warned
) {
914 lopt
->synflood_warned
= 1;
915 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
916 proto
, ntohs(tcp_hdr(skb
)->dest
), msg
);
920 EXPORT_SYMBOL(tcp_syn_flood_action
);
923 * Save and compile IPv4 options into the request_sock if needed.
925 static struct ip_options_rcu
*tcp_v4_save_options(struct sk_buff
*skb
)
927 const struct ip_options
*opt
= &(IPCB(skb
)->opt
);
928 struct ip_options_rcu
*dopt
= NULL
;
930 if (opt
&& opt
->optlen
) {
931 int opt_size
= sizeof(*dopt
) + opt
->optlen
;
933 dopt
= kmalloc(opt_size
, GFP_ATOMIC
);
935 if (ip_options_echo(&dopt
->opt
, skb
)) {
944 #ifdef CONFIG_TCP_MD5SIG
946 * RFC2385 MD5 checksumming requires a mapping of
947 * IP address->MD5 Key.
948 * We need to maintain these in the sk structure.
951 /* Find the Key structure for an address. */
952 struct tcp_md5sig_key
*tcp_md5_do_lookup(struct sock
*sk
,
953 const union tcp_md5_addr
*addr
,
956 struct tcp_sock
*tp
= tcp_sk(sk
);
957 struct tcp_md5sig_key
*key
;
958 unsigned int size
= sizeof(struct in_addr
);
959 struct tcp_md5sig_info
*md5sig
;
961 /* caller either holds rcu_read_lock() or socket lock */
962 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
963 sock_owned_by_user(sk
) ||
964 lockdep_is_held(&sk
->sk_lock
.slock
));
967 #if IS_ENABLED(CONFIG_IPV6)
968 if (family
== AF_INET6
)
969 size
= sizeof(struct in6_addr
);
971 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
972 if (key
->family
!= family
)
974 if (!memcmp(&key
->addr
, addr
, size
))
979 EXPORT_SYMBOL(tcp_md5_do_lookup
);
981 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
982 struct sock
*addr_sk
)
984 union tcp_md5_addr
*addr
;
986 addr
= (union tcp_md5_addr
*)&inet_sk(addr_sk
)->inet_daddr
;
987 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
989 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
991 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
992 struct request_sock
*req
)
994 union tcp_md5_addr
*addr
;
996 addr
= (union tcp_md5_addr
*)&inet_rsk(req
)->rmt_addr
;
997 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
1000 /* This can be called on a newly created socket, from other files */
1001 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
1002 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
1004 /* Add Key to the list */
1005 struct tcp_md5sig_key
*key
;
1006 struct tcp_sock
*tp
= tcp_sk(sk
);
1007 struct tcp_md5sig_info
*md5sig
;
1009 key
= tcp_md5_do_lookup(sk
, addr
, family
);
1011 /* Pre-existing entry - just update that one. */
1012 memcpy(key
->key
, newkey
, newkeylen
);
1013 key
->keylen
= newkeylen
;
1017 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1018 sock_owned_by_user(sk
));
1020 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
1024 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1025 INIT_HLIST_HEAD(&md5sig
->head
);
1026 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
1029 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
1032 if (hlist_empty(&md5sig
->head
) && !tcp_alloc_md5sig_pool(sk
)) {
1033 sock_kfree_s(sk
, key
, sizeof(*key
));
1037 memcpy(key
->key
, newkey
, newkeylen
);
1038 key
->keylen
= newkeylen
;
1039 key
->family
= family
;
1040 memcpy(&key
->addr
, addr
,
1041 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
1042 sizeof(struct in_addr
));
1043 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
1046 EXPORT_SYMBOL(tcp_md5_do_add
);
1048 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
1050 struct tcp_sock
*tp
= tcp_sk(sk
);
1051 struct tcp_md5sig_key
*key
;
1052 struct tcp_md5sig_info
*md5sig
;
1054 key
= tcp_md5_do_lookup(sk
, addr
, family
);
1057 hlist_del_rcu(&key
->node
);
1058 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1059 kfree_rcu(key
, rcu
);
1060 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1061 sock_owned_by_user(sk
));
1062 if (hlist_empty(&md5sig
->head
))
1063 tcp_free_md5sig_pool();
1066 EXPORT_SYMBOL(tcp_md5_do_del
);
1068 static void tcp_clear_md5_list(struct sock
*sk
)
1070 struct tcp_sock
*tp
= tcp_sk(sk
);
1071 struct tcp_md5sig_key
*key
;
1072 struct hlist_node
*n
;
1073 struct tcp_md5sig_info
*md5sig
;
1075 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
1077 if (!hlist_empty(&md5sig
->head
))
1078 tcp_free_md5sig_pool();
1079 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
1080 hlist_del_rcu(&key
->node
);
1081 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1082 kfree_rcu(key
, rcu
);
1086 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1089 struct tcp_md5sig cmd
;
1090 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1092 if (optlen
< sizeof(cmd
))
1095 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1098 if (sin
->sin_family
!= AF_INET
)
1101 if (!cmd
.tcpm_key
|| !cmd
.tcpm_keylen
)
1102 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1105 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1108 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1109 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1113 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1114 __be32 daddr
, __be32 saddr
, int nbytes
)
1116 struct tcp4_pseudohdr
*bp
;
1117 struct scatterlist sg
;
1119 bp
= &hp
->md5_blk
.ip4
;
1122 * 1. the TCP pseudo-header (in the order: source IP address,
1123 * destination IP address, zero-padded protocol number, and
1129 bp
->protocol
= IPPROTO_TCP
;
1130 bp
->len
= cpu_to_be16(nbytes
);
1132 sg_init_one(&sg
, bp
, sizeof(*bp
));
1133 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1136 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1137 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1139 struct tcp_md5sig_pool
*hp
;
1140 struct hash_desc
*desc
;
1142 hp
= tcp_get_md5sig_pool();
1144 goto clear_hash_noput
;
1145 desc
= &hp
->md5_desc
;
1147 if (crypto_hash_init(desc
))
1149 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1151 if (tcp_md5_hash_header(hp
, th
))
1153 if (tcp_md5_hash_key(hp
, key
))
1155 if (crypto_hash_final(desc
, md5_hash
))
1158 tcp_put_md5sig_pool();
1162 tcp_put_md5sig_pool();
1164 memset(md5_hash
, 0, 16);
1168 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1169 const struct sock
*sk
, const struct request_sock
*req
,
1170 const struct sk_buff
*skb
)
1172 struct tcp_md5sig_pool
*hp
;
1173 struct hash_desc
*desc
;
1174 const struct tcphdr
*th
= tcp_hdr(skb
);
1175 __be32 saddr
, daddr
;
1178 saddr
= inet_sk(sk
)->inet_saddr
;
1179 daddr
= inet_sk(sk
)->inet_daddr
;
1181 saddr
= inet_rsk(req
)->loc_addr
;
1182 daddr
= inet_rsk(req
)->rmt_addr
;
1184 const struct iphdr
*iph
= ip_hdr(skb
);
1189 hp
= tcp_get_md5sig_pool();
1191 goto clear_hash_noput
;
1192 desc
= &hp
->md5_desc
;
1194 if (crypto_hash_init(desc
))
1197 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1199 if (tcp_md5_hash_header(hp
, th
))
1201 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1203 if (tcp_md5_hash_key(hp
, key
))
1205 if (crypto_hash_final(desc
, md5_hash
))
1208 tcp_put_md5sig_pool();
1212 tcp_put_md5sig_pool();
1214 memset(md5_hash
, 0, 16);
1217 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1219 static bool tcp_v4_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
1222 * This gets called for each TCP segment that arrives
1223 * so we want to be efficient.
1224 * We have 3 drop cases:
1225 * o No MD5 hash and one expected.
1226 * o MD5 hash and we're not expecting one.
1227 * o MD5 hash and its wrong.
1229 const __u8
*hash_location
= NULL
;
1230 struct tcp_md5sig_key
*hash_expected
;
1231 const struct iphdr
*iph
= ip_hdr(skb
);
1232 const struct tcphdr
*th
= tcp_hdr(skb
);
1234 unsigned char newhash
[16];
1236 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1238 hash_location
= tcp_parse_md5sig_option(th
);
1240 /* We've parsed the options - do we have a hash? */
1241 if (!hash_expected
&& !hash_location
)
1244 if (hash_expected
&& !hash_location
) {
1245 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1249 if (!hash_expected
&& hash_location
) {
1250 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1254 /* Okay, so this is hash_expected and hash_location -
1255 * so we need to calculate the checksum.
1257 genhash
= tcp_v4_md5_hash_skb(newhash
,
1261 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1262 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1263 &iph
->saddr
, ntohs(th
->source
),
1264 &iph
->daddr
, ntohs(th
->dest
),
1265 genhash
? " tcp_v4_calc_md5_hash failed"
1274 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1276 .obj_size
= sizeof(struct tcp_request_sock
),
1277 .rtx_syn_ack
= tcp_v4_rtx_synack
,
1278 .send_ack
= tcp_v4_reqsk_send_ack
,
1279 .destructor
= tcp_v4_reqsk_destructor
,
1280 .send_reset
= tcp_v4_send_reset
,
1281 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1284 #ifdef CONFIG_TCP_MD5SIG
1285 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1286 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1287 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1291 static bool tcp_fastopen_check(struct sock
*sk
, struct sk_buff
*skb
,
1292 struct request_sock
*req
,
1293 struct tcp_fastopen_cookie
*foc
,
1294 struct tcp_fastopen_cookie
*valid_foc
)
1296 bool skip_cookie
= false;
1297 struct fastopen_queue
*fastopenq
;
1299 if (likely(!fastopen_cookie_present(foc
))) {
1300 /* See include/net/tcp.h for the meaning of these knobs */
1301 if ((sysctl_tcp_fastopen
& TFO_SERVER_ALWAYS
) ||
1302 ((sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_REQD
) &&
1303 (TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
+ 1)))
1304 skip_cookie
= true; /* no cookie to validate */
1308 fastopenq
= inet_csk(sk
)->icsk_accept_queue
.fastopenq
;
1309 /* A FO option is present; bump the counter. */
1310 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVE
);
1312 /* Make sure the listener has enabled fastopen, and we don't
1313 * exceed the max # of pending TFO requests allowed before trying
1314 * to validating the cookie in order to avoid burning CPU cycles
1317 * XXX (TFO) - The implication of checking the max_qlen before
1318 * processing a cookie request is that clients can't differentiate
1319 * between qlen overflow causing Fast Open to be disabled
1320 * temporarily vs a server not supporting Fast Open at all.
1322 if ((sysctl_tcp_fastopen
& TFO_SERVER_ENABLE
) == 0 ||
1323 fastopenq
== NULL
|| fastopenq
->max_qlen
== 0)
1326 if (fastopenq
->qlen
>= fastopenq
->max_qlen
) {
1327 struct request_sock
*req1
;
1328 spin_lock(&fastopenq
->lock
);
1329 req1
= fastopenq
->rskq_rst_head
;
1330 if ((req1
== NULL
) || time_after(req1
->expires
, jiffies
)) {
1331 spin_unlock(&fastopenq
->lock
);
1332 NET_INC_STATS_BH(sock_net(sk
),
1333 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW
);
1334 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1338 fastopenq
->rskq_rst_head
= req1
->dl_next
;
1340 spin_unlock(&fastopenq
->lock
);
1344 tcp_rsk(req
)->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1347 if (foc
->len
== TCP_FASTOPEN_COOKIE_SIZE
) {
1348 if ((sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_CHKED
) == 0) {
1349 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1350 if ((valid_foc
->len
!= TCP_FASTOPEN_COOKIE_SIZE
) ||
1351 memcmp(&foc
->val
[0], &valid_foc
->val
[0],
1352 TCP_FASTOPEN_COOKIE_SIZE
) != 0)
1354 valid_foc
->len
= -1;
1356 /* Acknowledge the data received from the peer. */
1357 tcp_rsk(req
)->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1359 } else if (foc
->len
== 0) { /* Client requesting a cookie */
1360 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1361 NET_INC_STATS_BH(sock_net(sk
),
1362 LINUX_MIB_TCPFASTOPENCOOKIEREQD
);
1364 /* Client sent a cookie with wrong size. Treat it
1365 * the same as invalid and return a valid one.
1367 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1372 static int tcp_v4_conn_req_fastopen(struct sock
*sk
,
1373 struct sk_buff
*skb
,
1374 struct sk_buff
*skb_synack
,
1375 struct request_sock
*req
)
1377 struct tcp_sock
*tp
= tcp_sk(sk
);
1378 struct request_sock_queue
*queue
= &inet_csk(sk
)->icsk_accept_queue
;
1379 const struct inet_request_sock
*ireq
= inet_rsk(req
);
1383 req
->num_retrans
= 0;
1384 req
->num_timeout
= 0;
1387 child
= inet_csk(sk
)->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, NULL
);
1388 if (child
== NULL
) {
1389 NET_INC_STATS_BH(sock_net(sk
),
1390 LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
1391 kfree_skb(skb_synack
);
1394 err
= ip_build_and_send_pkt(skb_synack
, sk
, ireq
->loc_addr
,
1395 ireq
->rmt_addr
, ireq
->opt
);
1396 err
= net_xmit_eval(err
);
1398 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1399 /* XXX (TFO) - is it ok to ignore error and continue? */
1401 spin_lock(&queue
->fastopenq
->lock
);
1402 queue
->fastopenq
->qlen
++;
1403 spin_unlock(&queue
->fastopenq
->lock
);
1405 /* Initialize the child socket. Have to fix some values to take
1406 * into account the child is a Fast Open socket and is created
1407 * only out of the bits carried in the SYN packet.
1411 tp
->fastopen_rsk
= req
;
1412 /* Do a hold on the listner sk so that if the listener is being
1413 * closed, the child that has been accepted can live on and still
1414 * access listen_lock.
1417 tcp_rsk(req
)->listener
= sk
;
1419 /* RFC1323: The window in SYN & SYN/ACK segments is never
1420 * scaled. So correct it appropriately.
1422 tp
->snd_wnd
= ntohs(tcp_hdr(skb
)->window
);
1424 /* Activate the retrans timer so that SYNACK can be retransmitted.
1425 * The request socket is not added to the SYN table of the parent
1426 * because it's been added to the accept queue directly.
1428 inet_csk_reset_xmit_timer(child
, ICSK_TIME_RETRANS
,
1429 TCP_TIMEOUT_INIT
, TCP_RTO_MAX
);
1431 /* Add the child socket directly into the accept queue */
1432 inet_csk_reqsk_queue_add(sk
, req
, child
);
1434 /* Now finish processing the fastopen child socket. */
1435 inet_csk(child
)->icsk_af_ops
->rebuild_header(child
);
1436 tcp_init_congestion_control(child
);
1437 tcp_mtup_init(child
);
1438 tcp_init_buffer_space(child
);
1439 tcp_init_metrics(child
);
1441 /* Queue the data carried in the SYN packet. We need to first
1442 * bump skb's refcnt because the caller will attempt to free it.
1444 * XXX (TFO) - we honor a zero-payload TFO request for now.
1445 * (Any reason not to?)
1447 if (TCP_SKB_CB(skb
)->end_seq
== TCP_SKB_CB(skb
)->seq
+ 1) {
1448 /* Don't queue the skb if there is no payload in SYN.
1449 * XXX (TFO) - How about SYN+FIN?
1451 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1455 __skb_pull(skb
, tcp_hdr(skb
)->doff
* 4);
1456 skb_set_owner_r(skb
, child
);
1457 __skb_queue_tail(&child
->sk_receive_queue
, skb
);
1458 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1459 tp
->syn_data_acked
= 1;
1461 sk
->sk_data_ready(sk
, 0);
1462 bh_unlock_sock(child
);
1464 WARN_ON(req
->sk
== NULL
);
1468 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1470 struct tcp_options_received tmp_opt
;
1471 struct request_sock
*req
;
1472 struct inet_request_sock
*ireq
;
1473 struct tcp_sock
*tp
= tcp_sk(sk
);
1474 struct dst_entry
*dst
= NULL
;
1475 __be32 saddr
= ip_hdr(skb
)->saddr
;
1476 __be32 daddr
= ip_hdr(skb
)->daddr
;
1477 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1478 bool want_cookie
= false;
1480 struct tcp_fastopen_cookie foc
= { .len
= -1 };
1481 struct tcp_fastopen_cookie valid_foc
= { .len
= -1 };
1482 struct sk_buff
*skb_synack
;
1485 /* Never answer to SYNs send to broadcast or multicast */
1486 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1489 /* TW buckets are converted to open requests without
1490 * limitations, they conserve resources and peer is
1491 * evidently real one.
1493 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1494 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCP");
1499 /* Accept backlog is full. If we have already queued enough
1500 * of warm entries in syn queue, drop request. It is better than
1501 * clogging syn queue with openreqs with exponentially increasing
1504 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1) {
1505 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1509 req
= inet_reqsk_alloc(&tcp_request_sock_ops
);
1513 #ifdef CONFIG_TCP_MD5SIG
1514 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv4_ops
;
1517 tcp_clear_options(&tmp_opt
);
1518 tmp_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
1519 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1520 tcp_parse_options(skb
, &tmp_opt
, 0, want_cookie
? NULL
: &foc
);
1522 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1523 tcp_clear_options(&tmp_opt
);
1525 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1526 tcp_openreq_init(req
, &tmp_opt
, skb
);
1528 ireq
= inet_rsk(req
);
1529 ireq
->loc_addr
= daddr
;
1530 ireq
->rmt_addr
= saddr
;
1531 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1532 ireq
->opt
= tcp_v4_save_options(skb
);
1534 if (security_inet_conn_request(sk
, skb
, req
))
1537 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1538 TCP_ECN_create_request(req
, skb
, sock_net(sk
));
1541 isn
= cookie_v4_init_sequence(sk
, skb
, &req
->mss
);
1542 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1544 /* VJ's idea. We save last timestamp seen
1545 * from the destination in peer table, when entering
1546 * state TIME-WAIT, and check against it before
1547 * accepting new connection request.
1549 * If "isn" is not zero, this request hit alive
1550 * timewait bucket, so that all the necessary checks
1551 * are made in the function processing timewait state.
1553 if (tmp_opt
.saw_tstamp
&&
1554 tcp_death_row
.sysctl_tw_recycle
&&
1555 (dst
= inet_csk_route_req(sk
, &fl4
, req
)) != NULL
&&
1556 fl4
.daddr
== saddr
) {
1557 if (!tcp_peer_is_proven(req
, dst
, true)) {
1558 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1559 goto drop_and_release
;
1562 /* Kill the following clause, if you dislike this way. */
1563 else if (!sysctl_tcp_syncookies
&&
1564 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1565 (sysctl_max_syn_backlog
>> 2)) &&
1566 !tcp_peer_is_proven(req
, dst
, false)) {
1567 /* Without syncookies last quarter of
1568 * backlog is filled with destinations,
1569 * proven to be alive.
1570 * It means that we continue to communicate
1571 * to destinations, already remembered
1572 * to the moment of synflood.
1574 LIMIT_NETDEBUG(KERN_DEBUG
pr_fmt("drop open request from %pI4/%u\n"),
1575 &saddr
, ntohs(tcp_hdr(skb
)->source
));
1576 goto drop_and_release
;
1579 isn
= tcp_v4_init_sequence(skb
);
1581 tcp_rsk(req
)->snt_isn
= isn
;
1584 dst
= inet_csk_route_req(sk
, &fl4
, req
);
1588 do_fastopen
= tcp_fastopen_check(sk
, skb
, req
, &foc
, &valid_foc
);
1590 /* We don't call tcp_v4_send_synack() directly because we need
1591 * to make sure a child socket can be created successfully before
1592 * sending back synack!
1594 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1595 * (or better yet, call tcp_send_synack() in the child context
1596 * directly, but will have to fix bunch of other code first)
1597 * after syn_recv_sock() except one will need to first fix the
1598 * latter to remove its dependency on the current implementation
1599 * of tcp_v4_send_synack()->tcp_select_initial_window().
1601 skb_synack
= tcp_make_synack(sk
, dst
, req
,
1602 fastopen_cookie_present(&valid_foc
) ? &valid_foc
: NULL
);
1605 __tcp_v4_send_check(skb_synack
, ireq
->loc_addr
, ireq
->rmt_addr
);
1606 skb_set_queue_mapping(skb_synack
, skb_get_queue_mapping(skb
));
1610 if (likely(!do_fastopen
)) {
1612 err
= ip_build_and_send_pkt(skb_synack
, sk
, ireq
->loc_addr
,
1613 ireq
->rmt_addr
, ireq
->opt
);
1614 err
= net_xmit_eval(err
);
1615 if (err
|| want_cookie
)
1618 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1619 tcp_rsk(req
)->listener
= NULL
;
1620 /* Add the request_sock to the SYN table */
1621 inet_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1622 if (fastopen_cookie_present(&foc
) && foc
.len
!= 0)
1623 NET_INC_STATS_BH(sock_net(sk
),
1624 LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
1625 } else if (tcp_v4_conn_req_fastopen(sk
, skb
, skb_synack
, req
))
1635 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1638 EXPORT_SYMBOL(tcp_v4_conn_request
);
1642 * The three way handshake has completed - we got a valid synack -
1643 * now create the new socket.
1645 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1646 struct request_sock
*req
,
1647 struct dst_entry
*dst
)
1649 struct inet_request_sock
*ireq
;
1650 struct inet_sock
*newinet
;
1651 struct tcp_sock
*newtp
;
1653 #ifdef CONFIG_TCP_MD5SIG
1654 struct tcp_md5sig_key
*key
;
1656 struct ip_options_rcu
*inet_opt
;
1658 if (sk_acceptq_is_full(sk
))
1661 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1665 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1666 inet_sk_rx_dst_set(newsk
, skb
);
1668 newtp
= tcp_sk(newsk
);
1669 newinet
= inet_sk(newsk
);
1670 ireq
= inet_rsk(req
);
1671 newinet
->inet_daddr
= ireq
->rmt_addr
;
1672 newinet
->inet_rcv_saddr
= ireq
->loc_addr
;
1673 newinet
->inet_saddr
= ireq
->loc_addr
;
1674 inet_opt
= ireq
->opt
;
1675 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1677 newinet
->mc_index
= inet_iif(skb
);
1678 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1679 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1680 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1682 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1683 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1686 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1690 /* syncookie case : see end of cookie_v4_check() */
1692 sk_setup_caps(newsk
, dst
);
1694 tcp_mtup_init(newsk
);
1695 tcp_sync_mss(newsk
, dst_mtu(dst
));
1696 newtp
->advmss
= dst_metric_advmss(dst
);
1697 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1698 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1699 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1701 tcp_initialize_rcv_mss(newsk
);
1702 tcp_synack_rtt_meas(newsk
, req
);
1703 newtp
->total_retrans
= req
->num_retrans
;
1705 #ifdef CONFIG_TCP_MD5SIG
1706 /* Copy over the MD5 key from the original socket */
1707 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1711 * We're using one, so create a matching key
1712 * on the newsk structure. If we fail to get
1713 * memory, then we end up not copying the key
1716 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1717 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1718 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1722 if (__inet_inherit_port(sk
, newsk
) < 0)
1724 __inet_hash_nolisten(newsk
, NULL
);
1729 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1733 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1736 inet_csk_prepare_forced_close(newsk
);
1740 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1742 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1744 struct tcphdr
*th
= tcp_hdr(skb
);
1745 const struct iphdr
*iph
= ip_hdr(skb
);
1747 struct request_sock
**prev
;
1748 /* Find possible connection requests. */
1749 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1750 iph
->saddr
, iph
->daddr
);
1752 return tcp_check_req(sk
, skb
, req
, prev
, false);
1754 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1755 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1758 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1762 inet_twsk_put(inet_twsk(nsk
));
1766 #ifdef CONFIG_SYN_COOKIES
1768 sk
= cookie_v4_check(sk
, skb
, &(IPCB(skb
)->opt
));
1773 static __sum16
tcp_v4_checksum_init(struct sk_buff
*skb
)
1775 const struct iphdr
*iph
= ip_hdr(skb
);
1777 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1778 if (!tcp_v4_check(skb
->len
, iph
->saddr
,
1779 iph
->daddr
, skb
->csum
)) {
1780 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1785 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1786 skb
->len
, IPPROTO_TCP
, 0);
1788 if (skb
->len
<= 76) {
1789 return __skb_checksum_complete(skb
);
1795 /* The socket must have it's spinlock held when we get
1798 * We have a potential double-lock case here, so even when
1799 * doing backlog processing we use the BH locking scheme.
1800 * This is because we cannot sleep with the original spinlock
1803 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1806 #ifdef CONFIG_TCP_MD5SIG
1808 * We really want to reject the packet as early as possible
1810 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1811 * o There is an MD5 option and we're not expecting one
1813 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1817 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1818 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1820 sock_rps_save_rxhash(sk
, skb
);
1822 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1823 dst
->ops
->check(dst
, 0) == NULL
) {
1825 sk
->sk_rx_dst
= NULL
;
1828 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1835 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1838 if (sk
->sk_state
== TCP_LISTEN
) {
1839 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1844 sock_rps_save_rxhash(nsk
, skb
);
1845 if (tcp_child_process(sk
, nsk
, skb
)) {
1852 sock_rps_save_rxhash(sk
, skb
);
1854 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1861 tcp_v4_send_reset(rsk
, skb
);
1864 /* Be careful here. If this function gets more complicated and
1865 * gcc suffers from register pressure on the x86, sk (in %ebx)
1866 * might be destroyed here. This current version compiles correctly,
1867 * but you have been warned.
1872 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1873 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1876 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1878 void tcp_v4_early_demux(struct sk_buff
*skb
)
1880 const struct iphdr
*iph
;
1881 const struct tcphdr
*th
;
1884 if (skb
->pkt_type
!= PACKET_HOST
)
1887 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1893 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1896 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1897 iph
->saddr
, th
->source
,
1898 iph
->daddr
, ntohs(th
->dest
),
1902 skb
->destructor
= sock_edemux
;
1903 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1904 struct dst_entry
*dst
= ACCESS_ONCE(sk
->sk_rx_dst
);
1907 dst
= dst_check(dst
, 0);
1909 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1910 skb_dst_set_noref(skb
, dst
);
1915 /* Packet is added to VJ-style prequeue for processing in process
1916 * context, if a reader task is waiting. Apparently, this exciting
1917 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1918 * failed somewhere. Latency? Burstiness? Well, at least now we will
1919 * see, why it failed. 8)8) --ANK
1922 bool tcp_prequeue(struct sock
*sk
, struct sk_buff
*skb
)
1924 struct tcp_sock
*tp
= tcp_sk(sk
);
1926 if (sysctl_tcp_low_latency
|| !tp
->ucopy
.task
)
1929 if (skb
->len
<= tcp_hdrlen(skb
) &&
1930 skb_queue_len(&tp
->ucopy
.prequeue
) == 0)
1934 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
);
1935 tp
->ucopy
.memory
+= skb
->truesize
;
1936 if (tp
->ucopy
.memory
> sk
->sk_rcvbuf
) {
1937 struct sk_buff
*skb1
;
1939 BUG_ON(sock_owned_by_user(sk
));
1941 while ((skb1
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
) {
1942 sk_backlog_rcv(sk
, skb1
);
1943 NET_INC_STATS_BH(sock_net(sk
),
1944 LINUX_MIB_TCPPREQUEUEDROPPED
);
1947 tp
->ucopy
.memory
= 0;
1948 } else if (skb_queue_len(&tp
->ucopy
.prequeue
) == 1) {
1949 wake_up_interruptible_sync_poll(sk_sleep(sk
),
1950 POLLIN
| POLLRDNORM
| POLLRDBAND
);
1951 if (!inet_csk_ack_scheduled(sk
))
1952 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
1953 (3 * tcp_rto_min(sk
)) / 4,
1958 EXPORT_SYMBOL(tcp_prequeue
);
1964 int tcp_v4_rcv(struct sk_buff
*skb
)
1966 const struct iphdr
*iph
;
1967 const struct tcphdr
*th
;
1970 struct net
*net
= dev_net(skb
->dev
);
1972 if (skb
->pkt_type
!= PACKET_HOST
)
1975 /* Count it even if it's bad */
1976 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1978 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1983 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1985 if (!pskb_may_pull(skb
, th
->doff
* 4))
1988 /* An explanation is required here, I think.
1989 * Packet length and doff are validated by header prediction,
1990 * provided case of th->doff==0 is eliminated.
1991 * So, we defer the checks. */
1992 if (!skb_csum_unnecessary(skb
) && tcp_v4_checksum_init(skb
))
1997 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1998 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1999 skb
->len
- th
->doff
* 4);
2000 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
2001 TCP_SKB_CB(skb
)->when
= 0;
2002 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
2003 TCP_SKB_CB(skb
)->sacked
= 0;
2005 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
2010 if (sk
->sk_state
== TCP_TIME_WAIT
)
2013 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
2014 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
2015 goto discard_and_relse
;
2018 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
2019 goto discard_and_relse
;
2022 if (sk_filter(sk
, skb
))
2023 goto discard_and_relse
;
2027 bh_lock_sock_nested(sk
);
2029 if (!sock_owned_by_user(sk
)) {
2030 #ifdef CONFIG_NET_DMA
2031 struct tcp_sock
*tp
= tcp_sk(sk
);
2032 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
2033 tp
->ucopy
.dma_chan
= net_dma_find_channel();
2034 if (tp
->ucopy
.dma_chan
)
2035 ret
= tcp_v4_do_rcv(sk
, skb
);
2039 if (!tcp_prequeue(sk
, skb
))
2040 ret
= tcp_v4_do_rcv(sk
, skb
);
2042 } else if (unlikely(sk_add_backlog(sk
, skb
,
2043 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
2045 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
2046 goto discard_and_relse
;
2055 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
2058 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
2060 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
2062 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
2064 tcp_v4_send_reset(NULL
, skb
);
2068 /* Discard frame. */
2077 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
2078 inet_twsk_put(inet_twsk(sk
));
2082 if (skb
->len
< (th
->doff
<< 2)) {
2083 inet_twsk_put(inet_twsk(sk
));
2086 if (tcp_checksum_complete(skb
)) {
2087 inet_twsk_put(inet_twsk(sk
));
2090 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
2092 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
2094 iph
->saddr
, th
->source
,
2095 iph
->daddr
, th
->dest
,
2098 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
2099 inet_twsk_put(inet_twsk(sk
));
2103 /* Fall through to ACK */
2106 tcp_v4_timewait_ack(sk
, skb
);
2110 case TCP_TW_SUCCESS
:;
2115 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
2116 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
2117 .twsk_unique
= tcp_twsk_unique
,
2118 .twsk_destructor
= tcp_twsk_destructor
,
2121 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
2123 struct dst_entry
*dst
= skb_dst(skb
);
2126 sk
->sk_rx_dst
= dst
;
2127 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
2129 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
2131 const struct inet_connection_sock_af_ops ipv4_specific
= {
2132 .queue_xmit
= ip_queue_xmit
,
2133 .send_check
= tcp_v4_send_check
,
2134 .rebuild_header
= inet_sk_rebuild_header
,
2135 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
2136 .conn_request
= tcp_v4_conn_request
,
2137 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
2138 .net_header_len
= sizeof(struct iphdr
),
2139 .setsockopt
= ip_setsockopt
,
2140 .getsockopt
= ip_getsockopt
,
2141 .addr2sockaddr
= inet_csk_addr2sockaddr
,
2142 .sockaddr_len
= sizeof(struct sockaddr_in
),
2143 .bind_conflict
= inet_csk_bind_conflict
,
2144 #ifdef CONFIG_COMPAT
2145 .compat_setsockopt
= compat_ip_setsockopt
,
2146 .compat_getsockopt
= compat_ip_getsockopt
,
2148 .mtu_reduced
= tcp_v4_mtu_reduced
,
2150 EXPORT_SYMBOL(ipv4_specific
);
2152 #ifdef CONFIG_TCP_MD5SIG
2153 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
2154 .md5_lookup
= tcp_v4_md5_lookup
,
2155 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
2156 .md5_parse
= tcp_v4_parse_md5_keys
,
2160 /* NOTE: A lot of things set to zero explicitly by call to
2161 * sk_alloc() so need not be done here.
2163 static int tcp_v4_init_sock(struct sock
*sk
)
2165 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2169 icsk
->icsk_af_ops
= &ipv4_specific
;
2171 #ifdef CONFIG_TCP_MD5SIG
2172 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
2178 void tcp_v4_destroy_sock(struct sock
*sk
)
2180 struct tcp_sock
*tp
= tcp_sk(sk
);
2182 tcp_clear_xmit_timers(sk
);
2184 tcp_cleanup_congestion_control(sk
);
2186 /* Cleanup up the write buffer. */
2187 tcp_write_queue_purge(sk
);
2189 /* Cleans up our, hopefully empty, out_of_order_queue. */
2190 __skb_queue_purge(&tp
->out_of_order_queue
);
2192 #ifdef CONFIG_TCP_MD5SIG
2193 /* Clean up the MD5 key list, if any */
2194 if (tp
->md5sig_info
) {
2195 tcp_clear_md5_list(sk
);
2196 kfree_rcu(tp
->md5sig_info
, rcu
);
2197 tp
->md5sig_info
= NULL
;
2201 #ifdef CONFIG_NET_DMA
2202 /* Cleans up our sk_async_wait_queue */
2203 __skb_queue_purge(&sk
->sk_async_wait_queue
);
2206 /* Clean prequeue, it must be empty really */
2207 __skb_queue_purge(&tp
->ucopy
.prequeue
);
2209 /* Clean up a referenced TCP bind bucket. */
2210 if (inet_csk(sk
)->icsk_bind_hash
)
2213 BUG_ON(tp
->fastopen_rsk
!= NULL
);
2215 /* If socket is aborted during connect operation */
2216 tcp_free_fastopen_req(tp
);
2218 sk_sockets_allocated_dec(sk
);
2219 sock_release_memcg(sk
);
2221 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
2223 #ifdef CONFIG_PROC_FS
2224 /* Proc filesystem TCP sock list dumping. */
2226 static inline struct inet_timewait_sock
*tw_head(struct hlist_nulls_head
*head
)
2228 return hlist_nulls_empty(head
) ? NULL
:
2229 list_entry(head
->first
, struct inet_timewait_sock
, tw_node
);
2232 static inline struct inet_timewait_sock
*tw_next(struct inet_timewait_sock
*tw
)
2234 return !is_a_nulls(tw
->tw_node
.next
) ?
2235 hlist_nulls_entry(tw
->tw_node
.next
, typeof(*tw
), tw_node
) : NULL
;
2239 * Get next listener socket follow cur. If cur is NULL, get first socket
2240 * starting from bucket given in st->bucket; when st->bucket is zero the
2241 * very first socket in the hash table is returned.
2243 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
2245 struct inet_connection_sock
*icsk
;
2246 struct hlist_nulls_node
*node
;
2247 struct sock
*sk
= cur
;
2248 struct inet_listen_hashbucket
*ilb
;
2249 struct tcp_iter_state
*st
= seq
->private;
2250 struct net
*net
= seq_file_net(seq
);
2253 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2254 spin_lock_bh(&ilb
->lock
);
2255 sk
= sk_nulls_head(&ilb
->head
);
2259 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2263 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
2264 struct request_sock
*req
= cur
;
2266 icsk
= inet_csk(st
->syn_wait_sk
);
2270 if (req
->rsk_ops
->family
== st
->family
) {
2276 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
2279 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
2281 sk
= sk_nulls_next(st
->syn_wait_sk
);
2282 st
->state
= TCP_SEQ_STATE_LISTENING
;
2283 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2285 icsk
= inet_csk(sk
);
2286 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2287 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
2289 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2290 sk
= sk_nulls_next(sk
);
2293 sk_nulls_for_each_from(sk
, node
) {
2294 if (!net_eq(sock_net(sk
), net
))
2296 if (sk
->sk_family
== st
->family
) {
2300 icsk
= inet_csk(sk
);
2301 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2302 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
2304 st
->uid
= sock_i_uid(sk
);
2305 st
->syn_wait_sk
= sk
;
2306 st
->state
= TCP_SEQ_STATE_OPENREQ
;
2310 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2312 spin_unlock_bh(&ilb
->lock
);
2314 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
2315 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2316 spin_lock_bh(&ilb
->lock
);
2317 sk
= sk_nulls_head(&ilb
->head
);
2325 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2327 struct tcp_iter_state
*st
= seq
->private;
2332 rc
= listening_get_next(seq
, NULL
);
2334 while (rc
&& *pos
) {
2335 rc
= listening_get_next(seq
, rc
);
2341 static inline bool empty_bucket(struct tcp_iter_state
*st
)
2343 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
) &&
2344 hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2348 * Get first established socket starting from bucket given in st->bucket.
2349 * If st->bucket is zero, the very first socket in the hash is returned.
2351 static void *established_get_first(struct seq_file
*seq
)
2353 struct tcp_iter_state
*st
= seq
->private;
2354 struct net
*net
= seq_file_net(seq
);
2358 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2360 struct hlist_nulls_node
*node
;
2361 struct inet_timewait_sock
*tw
;
2362 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2364 /* Lockless fast path for the common case of empty buckets */
2365 if (empty_bucket(st
))
2369 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2370 if (sk
->sk_family
!= st
->family
||
2371 !net_eq(sock_net(sk
), net
)) {
2377 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2378 inet_twsk_for_each(tw
, node
,
2379 &tcp_hashinfo
.ehash
[st
->bucket
].twchain
) {
2380 if (tw
->tw_family
!= st
->family
||
2381 !net_eq(twsk_net(tw
), net
)) {
2387 spin_unlock_bh(lock
);
2388 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2394 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2396 struct sock
*sk
= cur
;
2397 struct inet_timewait_sock
*tw
;
2398 struct hlist_nulls_node
*node
;
2399 struct tcp_iter_state
*st
= seq
->private;
2400 struct net
*net
= seq_file_net(seq
);
2405 if (st
->state
== TCP_SEQ_STATE_TIME_WAIT
) {
2409 while (tw
&& (tw
->tw_family
!= st
->family
|| !net_eq(twsk_net(tw
), net
))) {
2416 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2417 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2419 /* Look for next non empty bucket */
2421 while (++st
->bucket
<= tcp_hashinfo
.ehash_mask
&&
2424 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2427 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2428 sk
= sk_nulls_head(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2430 sk
= sk_nulls_next(sk
);
2432 sk_nulls_for_each_from(sk
, node
) {
2433 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2437 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2438 tw
= tw_head(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2446 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2448 struct tcp_iter_state
*st
= seq
->private;
2452 rc
= established_get_first(seq
);
2455 rc
= established_get_next(seq
, rc
);
2461 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2464 struct tcp_iter_state
*st
= seq
->private;
2466 st
->state
= TCP_SEQ_STATE_LISTENING
;
2467 rc
= listening_get_idx(seq
, &pos
);
2470 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2471 rc
= established_get_idx(seq
, pos
);
2477 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2479 struct tcp_iter_state
*st
= seq
->private;
2480 int offset
= st
->offset
;
2481 int orig_num
= st
->num
;
2484 switch (st
->state
) {
2485 case TCP_SEQ_STATE_OPENREQ
:
2486 case TCP_SEQ_STATE_LISTENING
:
2487 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2489 st
->state
= TCP_SEQ_STATE_LISTENING
;
2490 rc
= listening_get_next(seq
, NULL
);
2491 while (offset
-- && rc
)
2492 rc
= listening_get_next(seq
, rc
);
2497 case TCP_SEQ_STATE_ESTABLISHED
:
2498 case TCP_SEQ_STATE_TIME_WAIT
:
2499 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2500 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2502 rc
= established_get_first(seq
);
2503 while (offset
-- && rc
)
2504 rc
= established_get_next(seq
, rc
);
2512 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2514 struct tcp_iter_state
*st
= seq
->private;
2517 if (*pos
&& *pos
== st
->last_pos
) {
2518 rc
= tcp_seek_last_pos(seq
);
2523 st
->state
= TCP_SEQ_STATE_LISTENING
;
2527 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2530 st
->last_pos
= *pos
;
2534 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2536 struct tcp_iter_state
*st
= seq
->private;
2539 if (v
== SEQ_START_TOKEN
) {
2540 rc
= tcp_get_idx(seq
, 0);
2544 switch (st
->state
) {
2545 case TCP_SEQ_STATE_OPENREQ
:
2546 case TCP_SEQ_STATE_LISTENING
:
2547 rc
= listening_get_next(seq
, v
);
2549 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2552 rc
= established_get_first(seq
);
2555 case TCP_SEQ_STATE_ESTABLISHED
:
2556 case TCP_SEQ_STATE_TIME_WAIT
:
2557 rc
= established_get_next(seq
, v
);
2562 st
->last_pos
= *pos
;
2566 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2568 struct tcp_iter_state
*st
= seq
->private;
2570 switch (st
->state
) {
2571 case TCP_SEQ_STATE_OPENREQ
:
2573 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2574 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2576 case TCP_SEQ_STATE_LISTENING
:
2577 if (v
!= SEQ_START_TOKEN
)
2578 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2580 case TCP_SEQ_STATE_TIME_WAIT
:
2581 case TCP_SEQ_STATE_ESTABLISHED
:
2583 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2588 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2590 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2591 struct tcp_iter_state
*s
;
2594 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2595 sizeof(struct tcp_iter_state
));
2599 s
= ((struct seq_file
*)file
->private_data
)->private;
2600 s
->family
= afinfo
->family
;
2604 EXPORT_SYMBOL(tcp_seq_open
);
2606 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2609 struct proc_dir_entry
*p
;
2611 afinfo
->seq_ops
.start
= tcp_seq_start
;
2612 afinfo
->seq_ops
.next
= tcp_seq_next
;
2613 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2615 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2616 afinfo
->seq_fops
, afinfo
);
2621 EXPORT_SYMBOL(tcp_proc_register
);
2623 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2625 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2627 EXPORT_SYMBOL(tcp_proc_unregister
);
2629 static void get_openreq4(const struct sock
*sk
, const struct request_sock
*req
,
2630 struct seq_file
*f
, int i
, kuid_t uid
, int *len
)
2632 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2633 long delta
= req
->expires
- jiffies
;
2635 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2636 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2639 ntohs(inet_sk(sk
)->inet_sport
),
2641 ntohs(ireq
->rmt_port
),
2643 0, 0, /* could print option size, but that is af dependent. */
2644 1, /* timers active (only the expire timer) */
2645 jiffies_delta_to_clock_t(delta
),
2647 from_kuid_munged(seq_user_ns(f
), uid
),
2648 0, /* non standard timer */
2649 0, /* open_requests have no inode */
2650 atomic_read(&sk
->sk_refcnt
),
2655 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
, int *len
)
2658 unsigned long timer_expires
;
2659 const struct tcp_sock
*tp
= tcp_sk(sk
);
2660 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2661 const struct inet_sock
*inet
= inet_sk(sk
);
2662 struct fastopen_queue
*fastopenq
= icsk
->icsk_accept_queue
.fastopenq
;
2663 __be32 dest
= inet
->inet_daddr
;
2664 __be32 src
= inet
->inet_rcv_saddr
;
2665 __u16 destp
= ntohs(inet
->inet_dport
);
2666 __u16 srcp
= ntohs(inet
->inet_sport
);
2669 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2670 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
2671 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2673 timer_expires
= icsk
->icsk_timeout
;
2674 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2676 timer_expires
= icsk
->icsk_timeout
;
2677 } else if (timer_pending(&sk
->sk_timer
)) {
2679 timer_expires
= sk
->sk_timer
.expires
;
2682 timer_expires
= jiffies
;
2685 if (sk
->sk_state
== TCP_LISTEN
)
2686 rx_queue
= sk
->sk_ack_backlog
;
2689 * because we dont lock socket, we might find a transient negative value
2691 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2693 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2694 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2695 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2696 tp
->write_seq
- tp
->snd_una
,
2699 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2700 icsk
->icsk_retransmits
,
2701 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2702 icsk
->icsk_probes_out
,
2704 atomic_read(&sk
->sk_refcnt
), sk
,
2705 jiffies_to_clock_t(icsk
->icsk_rto
),
2706 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2707 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2709 sk
->sk_state
== TCP_LISTEN
?
2710 (fastopenq
? fastopenq
->max_qlen
: 0) :
2711 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
),
2715 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2716 struct seq_file
*f
, int i
, int *len
)
2720 long delta
= tw
->tw_ttd
- jiffies
;
2722 dest
= tw
->tw_daddr
;
2723 src
= tw
->tw_rcv_saddr
;
2724 destp
= ntohs(tw
->tw_dport
);
2725 srcp
= ntohs(tw
->tw_sport
);
2727 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2728 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2729 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2730 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2731 atomic_read(&tw
->tw_refcnt
), tw
, len
);
2736 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2738 struct tcp_iter_state
*st
;
2741 if (v
== SEQ_START_TOKEN
) {
2742 seq_printf(seq
, "%-*s\n", TMPSZ
- 1,
2743 " sl local_address rem_address st tx_queue "
2744 "rx_queue tr tm->when retrnsmt uid timeout "
2750 switch (st
->state
) {
2751 case TCP_SEQ_STATE_LISTENING
:
2752 case TCP_SEQ_STATE_ESTABLISHED
:
2753 get_tcp4_sock(v
, seq
, st
->num
, &len
);
2755 case TCP_SEQ_STATE_OPENREQ
:
2756 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
, &len
);
2758 case TCP_SEQ_STATE_TIME_WAIT
:
2759 get_timewait4_sock(v
, seq
, st
->num
, &len
);
2762 seq_printf(seq
, "%*s\n", TMPSZ
- 1 - len
, "");
2767 static const struct file_operations tcp_afinfo_seq_fops
= {
2768 .owner
= THIS_MODULE
,
2769 .open
= tcp_seq_open
,
2771 .llseek
= seq_lseek
,
2772 .release
= seq_release_net
2775 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2778 .seq_fops
= &tcp_afinfo_seq_fops
,
2780 .show
= tcp4_seq_show
,
2784 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2786 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2789 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2791 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2794 static struct pernet_operations tcp4_net_ops
= {
2795 .init
= tcp4_proc_init_net
,
2796 .exit
= tcp4_proc_exit_net
,
2799 int __init
tcp4_proc_init(void)
2801 return register_pernet_subsys(&tcp4_net_ops
);
2804 void tcp4_proc_exit(void)
2806 unregister_pernet_subsys(&tcp4_net_ops
);
2808 #endif /* CONFIG_PROC_FS */
2810 struct sk_buff
**tcp4_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
2812 const struct iphdr
*iph
= skb_gro_network_header(skb
);
2816 switch (skb
->ip_summed
) {
2817 case CHECKSUM_COMPLETE
:
2818 if (!tcp_v4_check(skb_gro_len(skb
), iph
->saddr
, iph
->daddr
,
2820 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2824 NAPI_GRO_CB(skb
)->flush
= 1;
2828 wsum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
2829 skb_gro_len(skb
), IPPROTO_TCP
, 0);
2830 sum
= csum_fold(skb_checksum(skb
,
2831 skb_gro_offset(skb
),
2837 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2841 return tcp_gro_receive(head
, skb
);
2844 int tcp4_gro_complete(struct sk_buff
*skb
)
2846 const struct iphdr
*iph
= ip_hdr(skb
);
2847 struct tcphdr
*th
= tcp_hdr(skb
);
2849 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
2850 iph
->saddr
, iph
->daddr
, 0);
2851 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2853 return tcp_gro_complete(skb
);
2856 struct proto tcp_prot
= {
2858 .owner
= THIS_MODULE
,
2860 .connect
= tcp_v4_connect
,
2861 .disconnect
= tcp_disconnect
,
2862 .accept
= inet_csk_accept
,
2864 .init
= tcp_v4_init_sock
,
2865 .destroy
= tcp_v4_destroy_sock
,
2866 .shutdown
= tcp_shutdown
,
2867 .setsockopt
= tcp_setsockopt
,
2868 .getsockopt
= tcp_getsockopt
,
2869 .recvmsg
= tcp_recvmsg
,
2870 .sendmsg
= tcp_sendmsg
,
2871 .sendpage
= tcp_sendpage
,
2872 .backlog_rcv
= tcp_v4_do_rcv
,
2873 .release_cb
= tcp_release_cb
,
2875 .unhash
= inet_unhash
,
2876 .get_port
= inet_csk_get_port
,
2877 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2878 .sockets_allocated
= &tcp_sockets_allocated
,
2879 .orphan_count
= &tcp_orphan_count
,
2880 .memory_allocated
= &tcp_memory_allocated
,
2881 .memory_pressure
= &tcp_memory_pressure
,
2882 .sysctl_wmem
= sysctl_tcp_wmem
,
2883 .sysctl_rmem
= sysctl_tcp_rmem
,
2884 .max_header
= MAX_TCP_HEADER
,
2885 .obj_size
= sizeof(struct tcp_sock
),
2886 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2887 .twsk_prot
= &tcp_timewait_sock_ops
,
2888 .rsk_prot
= &tcp_request_sock_ops
,
2889 .h
.hashinfo
= &tcp_hashinfo
,
2890 .no_autobind
= true,
2891 #ifdef CONFIG_COMPAT
2892 .compat_setsockopt
= compat_tcp_setsockopt
,
2893 .compat_getsockopt
= compat_tcp_getsockopt
,
2895 #ifdef CONFIG_MEMCG_KMEM
2896 .init_cgroup
= tcp_init_cgroup
,
2897 .destroy_cgroup
= tcp_destroy_cgroup
,
2898 .proto_cgroup
= tcp_proto_cgroup
,
2901 EXPORT_SYMBOL(tcp_prot
);
2903 static void __net_exit
tcp_sk_exit(struct net
*net
)
2907 for_each_possible_cpu(cpu
)
2908 inet_ctl_sock_destroy(*per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
));
2909 free_percpu(net
->ipv4
.tcp_sk
);
2912 static int __net_init
tcp_sk_init(struct net
*net
)
2916 net
->ipv4
.tcp_sk
= alloc_percpu(struct sock
*);
2917 if (!net
->ipv4
.tcp_sk
)
2920 for_each_possible_cpu(cpu
) {
2923 res
= inet_ctl_sock_create(&sk
, PF_INET
, SOCK_RAW
,
2927 *per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
) = sk
;
2929 net
->ipv4
.sysctl_tcp_ecn
= 2;
2938 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2940 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2943 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2944 .init
= tcp_sk_init
,
2945 .exit
= tcp_sk_exit
,
2946 .exit_batch
= tcp_sk_exit_batch
,
2949 void __init
tcp_v4_init(void)
2951 inet_hashinfo_init(&tcp_hashinfo
);
2952 if (register_pernet_subsys(&tcp_sk_ops
))
2953 panic("Failed to create the TCP control socket.\n");