2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
85 int sysctl_tcp_tw_reuse __read_mostly
;
86 int sysctl_tcp_low_latency __read_mostly
;
87 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
90 #ifdef CONFIG_TCP_MD5SIG
91 static struct tcp_md5sig_key
*tcp_v4_md5_do_lookup(struct sock
*sk
,
93 static int tcp_v4_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
94 __be32 daddr
, __be32 saddr
, struct tcphdr
*th
);
97 struct tcp_md5sig_key
*tcp_v4_md5_do_lookup(struct sock
*sk
, __be32 addr
)
103 struct inet_hashinfo tcp_hashinfo
;
104 EXPORT_SYMBOL(tcp_hashinfo
);
106 static inline __u32
tcp_v4_init_sequence(struct sk_buff
*skb
)
108 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
111 tcp_hdr(skb
)->source
);
114 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
116 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
117 struct tcp_sock
*tp
= tcp_sk(sk
);
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
130 if (tcptw
->tw_ts_recent_stamp
&&
131 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
132 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
133 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
134 if (tp
->write_seq
== 0)
136 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
137 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
144 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
146 /* This will initiate an outgoing connection. */
147 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
149 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
150 struct inet_sock
*inet
= inet_sk(sk
);
151 struct tcp_sock
*tp
= tcp_sk(sk
);
152 __be16 orig_sport
, orig_dport
;
153 __be32 daddr
, nexthop
;
157 struct ip_options_rcu
*inet_opt
;
159 if (addr_len
< sizeof(struct sockaddr_in
))
162 if (usin
->sin_family
!= AF_INET
)
163 return -EAFNOSUPPORT
;
165 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
166 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
167 sock_owned_by_user(sk
));
168 if (inet_opt
&& inet_opt
->opt
.srr
) {
171 nexthop
= inet_opt
->opt
.faddr
;
174 orig_sport
= inet
->inet_sport
;
175 orig_dport
= usin
->sin_port
;
176 fl4
= &inet
->cork
.fl
.u
.ip4
;
177 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
178 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
180 orig_sport
, orig_dport
, sk
, true);
183 if (err
== -ENETUNREACH
)
184 IP_INC_STATS_BH(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
188 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
193 if (!inet_opt
|| !inet_opt
->opt
.srr
)
196 if (!inet
->inet_saddr
)
197 inet
->inet_saddr
= fl4
->saddr
;
198 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
200 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
201 /* Reset inherited state */
202 tp
->rx_opt
.ts_recent
= 0;
203 tp
->rx_opt
.ts_recent_stamp
= 0;
207 if (tcp_death_row
.sysctl_tw_recycle
&&
208 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
) {
209 struct inet_peer
*peer
= rt_get_peer(rt
);
211 * VJ's idea. We save last timestamp seen from
212 * the destination in peer table, when entering state
213 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
214 * when trying new connection.
217 inet_peer_refcheck(peer
);
218 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
219 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
220 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
225 inet
->inet_dport
= usin
->sin_port
;
226 inet
->inet_daddr
= daddr
;
228 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
230 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
232 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
234 /* Socket identity is still unknown (sport may be zero).
235 * However we set state to SYN-SENT and not releasing socket
236 * lock select source port, enter ourselves into the hash tables and
237 * complete initialization after this.
239 tcp_set_state(sk
, TCP_SYN_SENT
);
240 err
= inet_hash_connect(&tcp_death_row
, sk
);
244 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
245 inet
->inet_sport
, inet
->inet_dport
, sk
);
251 /* OK, now commit destination to socket. */
252 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
253 sk_setup_caps(sk
, &rt
->dst
);
256 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
261 inet
->inet_id
= tp
->write_seq
^ jiffies
;
263 err
= tcp_connect(sk
);
272 * This unhashes the socket and releases the local port,
275 tcp_set_state(sk
, TCP_CLOSE
);
277 sk
->sk_route_caps
= 0;
278 inet
->inet_dport
= 0;
281 EXPORT_SYMBOL(tcp_v4_connect
);
284 * This routine does path mtu discovery as defined in RFC1191.
286 static void do_pmtu_discovery(struct sock
*sk
, const struct iphdr
*iph
, u32 mtu
)
288 struct dst_entry
*dst
;
289 struct inet_sock
*inet
= inet_sk(sk
);
291 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
292 * send out by Linux are always <576bytes so they should go through
295 if (sk
->sk_state
== TCP_LISTEN
)
298 /* We don't check in the destentry if pmtu discovery is forbidden
299 * on this route. We just assume that no packet_to_big packets
300 * are send back when pmtu discovery is not active.
301 * There is a small race when the user changes this flag in the
302 * route, but I think that's acceptable.
304 if ((dst
= __sk_dst_check(sk
, 0)) == NULL
)
307 dst
->ops
->update_pmtu(dst
, mtu
);
309 /* Something is about to be wrong... Remember soft error
310 * for the case, if this connection will not able to recover.
312 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
313 sk
->sk_err_soft
= EMSGSIZE
;
317 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
318 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
319 tcp_sync_mss(sk
, mtu
);
321 /* Resend the TCP packet because it's
322 * clear that the old packet has been
323 * dropped. This is the new "fast" path mtu
326 tcp_simple_retransmit(sk
);
327 } /* else let the usual retransmit timer handle it */
331 * This routine is called by the ICMP module when it gets some
332 * sort of error condition. If err < 0 then the socket should
333 * be closed and the error returned to the user. If err > 0
334 * it's just the icmp type << 8 | icmp code. After adjustment
335 * header points to the first 8 bytes of the tcp header. We need
336 * to find the appropriate port.
338 * The locking strategy used here is very "optimistic". When
339 * someone else accesses the socket the ICMP is just dropped
340 * and for some paths there is no check at all.
341 * A more general error queue to queue errors for later handling
342 * is probably better.
346 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
348 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
349 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
350 struct inet_connection_sock
*icsk
;
352 struct inet_sock
*inet
;
353 const int type
= icmp_hdr(icmp_skb
)->type
;
354 const int code
= icmp_hdr(icmp_skb
)->code
;
360 struct net
*net
= dev_net(icmp_skb
->dev
);
362 if (icmp_skb
->len
< (iph
->ihl
<< 2) + 8) {
363 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
367 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
368 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
370 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
373 if (sk
->sk_state
== TCP_TIME_WAIT
) {
374 inet_twsk_put(inet_twsk(sk
));
379 /* If too many ICMPs get dropped on busy
380 * servers this needs to be solved differently.
382 if (sock_owned_by_user(sk
))
383 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
385 if (sk
->sk_state
== TCP_CLOSE
)
388 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
389 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
395 seq
= ntohl(th
->seq
);
396 if (sk
->sk_state
!= TCP_LISTEN
&&
397 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
398 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
403 case ICMP_SOURCE_QUENCH
:
404 /* Just silently ignore these. */
406 case ICMP_PARAMETERPROB
:
409 case ICMP_DEST_UNREACH
:
410 if (code
> NR_ICMP_UNREACH
)
413 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
414 if (!sock_owned_by_user(sk
))
415 do_pmtu_discovery(sk
, iph
, info
);
419 err
= icmp_err_convert
[code
].errno
;
420 /* check if icmp_skb allows revert of backoff
421 * (see draft-zimmermann-tcp-lcd) */
422 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
424 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
428 if (sock_owned_by_user(sk
))
431 icsk
->icsk_backoff
--;
432 inet_csk(sk
)->icsk_rto
= __tcp_set_rto(tp
) <<
436 skb
= tcp_write_queue_head(sk
);
439 remaining
= icsk
->icsk_rto
- min(icsk
->icsk_rto
,
440 tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
443 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
444 remaining
, TCP_RTO_MAX
);
446 /* RTO revert clocked out retransmission.
447 * Will retransmit now */
448 tcp_retransmit_timer(sk
);
452 case ICMP_TIME_EXCEEDED
:
459 switch (sk
->sk_state
) {
460 struct request_sock
*req
, **prev
;
462 if (sock_owned_by_user(sk
))
465 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
466 iph
->daddr
, iph
->saddr
);
470 /* ICMPs are not backlogged, hence we cannot get
471 an established socket here.
475 if (seq
!= tcp_rsk(req
)->snt_isn
) {
476 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
481 * Still in SYN_RECV, just remove it silently.
482 * There is no good way to pass the error to the newly
483 * created socket, and POSIX does not want network
484 * errors returned from accept().
486 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
490 case TCP_SYN_RECV
: /* Cannot happen.
491 It can f.e. if SYNs crossed.
493 if (!sock_owned_by_user(sk
)) {
496 sk
->sk_error_report(sk
);
500 sk
->sk_err_soft
= err
;
505 /* If we've already connected we will keep trying
506 * until we time out, or the user gives up.
508 * rfc1122 4.2.3.9 allows to consider as hard errors
509 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
510 * but it is obsoleted by pmtu discovery).
512 * Note, that in modern internet, where routing is unreliable
513 * and in each dark corner broken firewalls sit, sending random
514 * errors ordered by their masters even this two messages finally lose
515 * their original sense (even Linux sends invalid PORT_UNREACHs)
517 * Now we are in compliance with RFCs.
522 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
524 sk
->sk_error_report(sk
);
525 } else { /* Only an error on timeout */
526 sk
->sk_err_soft
= err
;
534 static void __tcp_v4_send_check(struct sk_buff
*skb
,
535 __be32 saddr
, __be32 daddr
)
537 struct tcphdr
*th
= tcp_hdr(skb
);
539 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
540 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
541 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
542 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
544 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
551 /* This routine computes an IPv4 TCP checksum. */
552 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
554 struct inet_sock
*inet
= inet_sk(sk
);
556 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
558 EXPORT_SYMBOL(tcp_v4_send_check
);
560 int tcp_v4_gso_send_check(struct sk_buff
*skb
)
562 const struct iphdr
*iph
;
565 if (!pskb_may_pull(skb
, sizeof(*th
)))
572 skb
->ip_summed
= CHECKSUM_PARTIAL
;
573 __tcp_v4_send_check(skb
, iph
->saddr
, iph
->daddr
);
578 * This routine will send an RST to the other tcp.
580 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
582 * Answer: if a packet caused RST, it is not for a socket
583 * existing in our system, if it is matched to a socket,
584 * it is just duplicate segment or bug in other side's TCP.
585 * So that we build reply only basing on parameters
586 * arrived with segment.
587 * Exception: precedence violation. We do not implement it in any case.
590 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
592 struct tcphdr
*th
= tcp_hdr(skb
);
595 #ifdef CONFIG_TCP_MD5SIG
596 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
599 struct ip_reply_arg arg
;
600 #ifdef CONFIG_TCP_MD5SIG
601 struct tcp_md5sig_key
*key
;
605 /* Never send a reset in response to a reset. */
609 if (skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
612 /* Swap the send and the receive. */
613 memset(&rep
, 0, sizeof(rep
));
614 rep
.th
.dest
= th
->source
;
615 rep
.th
.source
= th
->dest
;
616 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
620 rep
.th
.seq
= th
->ack_seq
;
623 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
624 skb
->len
- (th
->doff
<< 2));
627 memset(&arg
, 0, sizeof(arg
));
628 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
629 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
631 #ifdef CONFIG_TCP_MD5SIG
632 key
= sk
? tcp_v4_md5_do_lookup(sk
, ip_hdr(skb
)->daddr
) : NULL
;
634 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
636 (TCPOPT_MD5SIG
<< 8) |
638 /* Update length and the length the header thinks exists */
639 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
640 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
642 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
643 key
, ip_hdr(skb
)->saddr
,
644 ip_hdr(skb
)->daddr
, &rep
.th
);
647 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
648 ip_hdr(skb
)->saddr
, /* XXX */
649 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
650 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
651 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
653 net
= dev_net(skb_dst(skb
)->dev
);
654 ip_send_reply(net
->ipv4
.tcp_sock
, skb
, ip_hdr(skb
)->saddr
,
655 &arg
, arg
.iov
[0].iov_len
);
657 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
658 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
661 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
662 outside socket context is ugly, certainly. What can I do?
665 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
666 u32 win
, u32 ts
, int oif
,
667 struct tcp_md5sig_key
*key
,
670 struct tcphdr
*th
= tcp_hdr(skb
);
673 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
674 #ifdef CONFIG_TCP_MD5SIG
675 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
679 struct ip_reply_arg arg
;
680 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
682 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
683 memset(&arg
, 0, sizeof(arg
));
685 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
686 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
688 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
689 (TCPOPT_TIMESTAMP
<< 8) |
691 rep
.opt
[1] = htonl(tcp_time_stamp
);
692 rep
.opt
[2] = htonl(ts
);
693 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
696 /* Swap the send and the receive. */
697 rep
.th
.dest
= th
->source
;
698 rep
.th
.source
= th
->dest
;
699 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
700 rep
.th
.seq
= htonl(seq
);
701 rep
.th
.ack_seq
= htonl(ack
);
703 rep
.th
.window
= htons(win
);
705 #ifdef CONFIG_TCP_MD5SIG
707 int offset
= (ts
) ? 3 : 0;
709 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
711 (TCPOPT_MD5SIG
<< 8) |
713 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
714 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
716 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
717 key
, ip_hdr(skb
)->saddr
,
718 ip_hdr(skb
)->daddr
, &rep
.th
);
721 arg
.flags
= reply_flags
;
722 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
723 ip_hdr(skb
)->saddr
, /* XXX */
724 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
725 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
727 arg
.bound_dev_if
= oif
;
729 ip_send_reply(net
->ipv4
.tcp_sock
, skb
, ip_hdr(skb
)->saddr
,
730 &arg
, arg
.iov
[0].iov_len
);
732 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
735 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
737 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
738 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
740 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
741 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
744 tcp_twsk_md5_key(tcptw
),
745 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0
751 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
752 struct request_sock
*req
)
754 tcp_v4_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1,
755 tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
,
758 tcp_v4_md5_do_lookup(sk
, ip_hdr(skb
)->daddr
),
759 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0);
763 * Send a SYN-ACK after having received a SYN.
764 * This still operates on a request_sock only, not on a big
767 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
768 struct request_sock
*req
,
769 struct request_values
*rvp
)
771 const struct inet_request_sock
*ireq
= inet_rsk(req
);
773 struct sk_buff
* skb
;
775 /* First, grab a route. */
776 if (!dst
&& (dst
= inet_csk_route_req(sk
, req
)) == NULL
)
779 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
782 __tcp_v4_send_check(skb
, ireq
->loc_addr
, ireq
->rmt_addr
);
784 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->loc_addr
,
787 err
= net_xmit_eval(err
);
794 static int tcp_v4_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
795 struct request_values
*rvp
)
797 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
798 return tcp_v4_send_synack(sk
, NULL
, req
, rvp
);
802 * IPv4 request_sock destructor.
804 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
806 kfree(inet_rsk(req
)->opt
);
809 static void syn_flood_warning(const struct sk_buff
*skb
)
813 #ifdef CONFIG_SYN_COOKIES
814 if (sysctl_tcp_syncookies
)
815 msg
= "Sending cookies";
818 msg
= "Dropping request";
820 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
821 ntohs(tcp_hdr(skb
)->dest
), msg
);
825 * Save and compile IPv4 options into the request_sock if needed.
827 static struct ip_options_rcu
*tcp_v4_save_options(struct sock
*sk
,
830 const struct ip_options
*opt
= &(IPCB(skb
)->opt
);
831 struct ip_options_rcu
*dopt
= NULL
;
833 if (opt
&& opt
->optlen
) {
834 int opt_size
= sizeof(*dopt
) + opt
->optlen
;
836 dopt
= kmalloc(opt_size
, GFP_ATOMIC
);
838 if (ip_options_echo(&dopt
->opt
, skb
)) {
847 #ifdef CONFIG_TCP_MD5SIG
849 * RFC2385 MD5 checksumming requires a mapping of
850 * IP address->MD5 Key.
851 * We need to maintain these in the sk structure.
854 /* Find the Key structure for an address. */
855 static struct tcp_md5sig_key
*
856 tcp_v4_md5_do_lookup(struct sock
*sk
, __be32 addr
)
858 struct tcp_sock
*tp
= tcp_sk(sk
);
861 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries4
)
863 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++) {
864 if (tp
->md5sig_info
->keys4
[i
].addr
== addr
)
865 return &tp
->md5sig_info
->keys4
[i
].base
;
870 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
871 struct sock
*addr_sk
)
873 return tcp_v4_md5_do_lookup(sk
, inet_sk(addr_sk
)->inet_daddr
);
875 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
877 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
878 struct request_sock
*req
)
880 return tcp_v4_md5_do_lookup(sk
, inet_rsk(req
)->rmt_addr
);
883 /* This can be called on a newly created socket, from other files */
884 int tcp_v4_md5_do_add(struct sock
*sk
, __be32 addr
,
885 u8
*newkey
, u8 newkeylen
)
887 /* Add Key to the list */
888 struct tcp_md5sig_key
*key
;
889 struct tcp_sock
*tp
= tcp_sk(sk
);
890 struct tcp4_md5sig_key
*keys
;
892 key
= tcp_v4_md5_do_lookup(sk
, addr
);
894 /* Pre-existing entry - just update that one. */
897 key
->keylen
= newkeylen
;
899 struct tcp_md5sig_info
*md5sig
;
901 if (!tp
->md5sig_info
) {
902 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
),
904 if (!tp
->md5sig_info
) {
908 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
910 if (tcp_alloc_md5sig_pool(sk
) == NULL
) {
914 md5sig
= tp
->md5sig_info
;
916 if (md5sig
->alloced4
== md5sig
->entries4
) {
917 keys
= kmalloc((sizeof(*keys
) *
918 (md5sig
->entries4
+ 1)), GFP_ATOMIC
);
921 tcp_free_md5sig_pool();
925 if (md5sig
->entries4
)
926 memcpy(keys
, md5sig
->keys4
,
927 sizeof(*keys
) * md5sig
->entries4
);
929 /* Free old key list, and reference new one */
930 kfree(md5sig
->keys4
);
931 md5sig
->keys4
= keys
;
935 md5sig
->keys4
[md5sig
->entries4
- 1].addr
= addr
;
936 md5sig
->keys4
[md5sig
->entries4
- 1].base
.key
= newkey
;
937 md5sig
->keys4
[md5sig
->entries4
- 1].base
.keylen
= newkeylen
;
941 EXPORT_SYMBOL(tcp_v4_md5_do_add
);
943 static int tcp_v4_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
944 u8
*newkey
, u8 newkeylen
)
946 return tcp_v4_md5_do_add(sk
, inet_sk(addr_sk
)->inet_daddr
,
950 int tcp_v4_md5_do_del(struct sock
*sk
, __be32 addr
)
952 struct tcp_sock
*tp
= tcp_sk(sk
);
955 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++) {
956 if (tp
->md5sig_info
->keys4
[i
].addr
== addr
) {
958 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
959 tp
->md5sig_info
->entries4
--;
961 if (tp
->md5sig_info
->entries4
== 0) {
962 kfree(tp
->md5sig_info
->keys4
);
963 tp
->md5sig_info
->keys4
= NULL
;
964 tp
->md5sig_info
->alloced4
= 0;
965 } else if (tp
->md5sig_info
->entries4
!= i
) {
966 /* Need to do some manipulation */
967 memmove(&tp
->md5sig_info
->keys4
[i
],
968 &tp
->md5sig_info
->keys4
[i
+1],
969 (tp
->md5sig_info
->entries4
- i
) *
970 sizeof(struct tcp4_md5sig_key
));
972 tcp_free_md5sig_pool();
978 EXPORT_SYMBOL(tcp_v4_md5_do_del
);
980 static void tcp_v4_clear_md5_list(struct sock
*sk
)
982 struct tcp_sock
*tp
= tcp_sk(sk
);
984 /* Free each key, then the set of key keys,
985 * the crypto element, and then decrement our
986 * hold on the last resort crypto.
988 if (tp
->md5sig_info
->entries4
) {
990 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
991 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
992 tp
->md5sig_info
->entries4
= 0;
993 tcp_free_md5sig_pool();
995 if (tp
->md5sig_info
->keys4
) {
996 kfree(tp
->md5sig_info
->keys4
);
997 tp
->md5sig_info
->keys4
= NULL
;
998 tp
->md5sig_info
->alloced4
= 0;
1002 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1005 struct tcp_md5sig cmd
;
1006 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1009 if (optlen
< sizeof(cmd
))
1012 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1015 if (sin
->sin_family
!= AF_INET
)
1018 if (!cmd
.tcpm_key
|| !cmd
.tcpm_keylen
) {
1019 if (!tcp_sk(sk
)->md5sig_info
)
1021 return tcp_v4_md5_do_del(sk
, sin
->sin_addr
.s_addr
);
1024 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1027 if (!tcp_sk(sk
)->md5sig_info
) {
1028 struct tcp_sock
*tp
= tcp_sk(sk
);
1029 struct tcp_md5sig_info
*p
;
1031 p
= kzalloc(sizeof(*p
), sk
->sk_allocation
);
1035 tp
->md5sig_info
= p
;
1036 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1039 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, sk
->sk_allocation
);
1042 return tcp_v4_md5_do_add(sk
, sin
->sin_addr
.s_addr
,
1043 newkey
, cmd
.tcpm_keylen
);
1046 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1047 __be32 daddr
, __be32 saddr
, int nbytes
)
1049 struct tcp4_pseudohdr
*bp
;
1050 struct scatterlist sg
;
1052 bp
= &hp
->md5_blk
.ip4
;
1055 * 1. the TCP pseudo-header (in the order: source IP address,
1056 * destination IP address, zero-padded protocol number, and
1062 bp
->protocol
= IPPROTO_TCP
;
1063 bp
->len
= cpu_to_be16(nbytes
);
1065 sg_init_one(&sg
, bp
, sizeof(*bp
));
1066 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1069 static int tcp_v4_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
1070 __be32 daddr
, __be32 saddr
, struct tcphdr
*th
)
1072 struct tcp_md5sig_pool
*hp
;
1073 struct hash_desc
*desc
;
1075 hp
= tcp_get_md5sig_pool();
1077 goto clear_hash_noput
;
1078 desc
= &hp
->md5_desc
;
1080 if (crypto_hash_init(desc
))
1082 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1084 if (tcp_md5_hash_header(hp
, th
))
1086 if (tcp_md5_hash_key(hp
, key
))
1088 if (crypto_hash_final(desc
, md5_hash
))
1091 tcp_put_md5sig_pool();
1095 tcp_put_md5sig_pool();
1097 memset(md5_hash
, 0, 16);
1101 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1102 struct sock
*sk
, struct request_sock
*req
,
1103 struct sk_buff
*skb
)
1105 struct tcp_md5sig_pool
*hp
;
1106 struct hash_desc
*desc
;
1107 struct tcphdr
*th
= tcp_hdr(skb
);
1108 __be32 saddr
, daddr
;
1111 saddr
= inet_sk(sk
)->inet_saddr
;
1112 daddr
= inet_sk(sk
)->inet_daddr
;
1114 saddr
= inet_rsk(req
)->loc_addr
;
1115 daddr
= inet_rsk(req
)->rmt_addr
;
1117 const struct iphdr
*iph
= ip_hdr(skb
);
1122 hp
= tcp_get_md5sig_pool();
1124 goto clear_hash_noput
;
1125 desc
= &hp
->md5_desc
;
1127 if (crypto_hash_init(desc
))
1130 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1132 if (tcp_md5_hash_header(hp
, th
))
1134 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1136 if (tcp_md5_hash_key(hp
, key
))
1138 if (crypto_hash_final(desc
, md5_hash
))
1141 tcp_put_md5sig_pool();
1145 tcp_put_md5sig_pool();
1147 memset(md5_hash
, 0, 16);
1150 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1152 static int tcp_v4_inbound_md5_hash(struct sock
*sk
, struct sk_buff
*skb
)
1155 * This gets called for each TCP segment that arrives
1156 * so we want to be efficient.
1157 * We have 3 drop cases:
1158 * o No MD5 hash and one expected.
1159 * o MD5 hash and we're not expecting one.
1160 * o MD5 hash and its wrong.
1162 __u8
*hash_location
= NULL
;
1163 struct tcp_md5sig_key
*hash_expected
;
1164 const struct iphdr
*iph
= ip_hdr(skb
);
1165 struct tcphdr
*th
= tcp_hdr(skb
);
1167 unsigned char newhash
[16];
1169 hash_expected
= tcp_v4_md5_do_lookup(sk
, iph
->saddr
);
1170 hash_location
= tcp_parse_md5sig_option(th
);
1172 /* We've parsed the options - do we have a hash? */
1173 if (!hash_expected
&& !hash_location
)
1176 if (hash_expected
&& !hash_location
) {
1177 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1181 if (!hash_expected
&& hash_location
) {
1182 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1186 /* Okay, so this is hash_expected and hash_location -
1187 * so we need to calculate the checksum.
1189 genhash
= tcp_v4_md5_hash_skb(newhash
,
1193 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1194 if (net_ratelimit()) {
1195 printk(KERN_INFO
"MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1196 &iph
->saddr
, ntohs(th
->source
),
1197 &iph
->daddr
, ntohs(th
->dest
),
1198 genhash
? " tcp_v4_calc_md5_hash failed" : "");
1207 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1209 .obj_size
= sizeof(struct tcp_request_sock
),
1210 .rtx_syn_ack
= tcp_v4_rtx_synack
,
1211 .send_ack
= tcp_v4_reqsk_send_ack
,
1212 .destructor
= tcp_v4_reqsk_destructor
,
1213 .send_reset
= tcp_v4_send_reset
,
1214 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1217 #ifdef CONFIG_TCP_MD5SIG
1218 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1219 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1220 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1224 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1226 struct tcp_extend_values tmp_ext
;
1227 struct tcp_options_received tmp_opt
;
1229 struct request_sock
*req
;
1230 struct inet_request_sock
*ireq
;
1231 struct tcp_sock
*tp
= tcp_sk(sk
);
1232 struct dst_entry
*dst
= NULL
;
1233 __be32 saddr
= ip_hdr(skb
)->saddr
;
1234 __be32 daddr
= ip_hdr(skb
)->daddr
;
1235 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1236 #ifdef CONFIG_SYN_COOKIES
1237 int want_cookie
= 0;
1239 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1242 /* Never answer to SYNs send to broadcast or multicast */
1243 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1246 /* TW buckets are converted to open requests without
1247 * limitations, they conserve resources and peer is
1248 * evidently real one.
1250 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1251 if (net_ratelimit())
1252 syn_flood_warning(skb
);
1253 #ifdef CONFIG_SYN_COOKIES
1254 if (sysctl_tcp_syncookies
) {
1261 /* Accept backlog is full. If we have already queued enough
1262 * of warm entries in syn queue, drop request. It is better than
1263 * clogging syn queue with openreqs with exponentially increasing
1266 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1269 req
= inet_reqsk_alloc(&tcp_request_sock_ops
);
1273 #ifdef CONFIG_TCP_MD5SIG
1274 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv4_ops
;
1277 tcp_clear_options(&tmp_opt
);
1278 tmp_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
1279 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1280 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1282 if (tmp_opt
.cookie_plus
> 0 &&
1283 tmp_opt
.saw_tstamp
&&
1284 !tp
->rx_opt
.cookie_out_never
&&
1285 (sysctl_tcp_cookie_size
> 0 ||
1286 (tp
->cookie_values
!= NULL
&&
1287 tp
->cookie_values
->cookie_desired
> 0))) {
1289 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1290 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1292 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1293 goto drop_and_release
;
1295 /* Secret recipe starts with IP addresses */
1296 *mess
++ ^= (__force u32
)daddr
;
1297 *mess
++ ^= (__force u32
)saddr
;
1299 /* plus variable length Initiator Cookie */
1302 *c
++ ^= *hash_location
++;
1304 #ifdef CONFIG_SYN_COOKIES
1305 want_cookie
= 0; /* not our kind of cookie */
1307 tmp_ext
.cookie_out_never
= 0; /* false */
1308 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1309 } else if (!tp
->rx_opt
.cookie_in_always
) {
1310 /* redundant indications, but ensure initialization. */
1311 tmp_ext
.cookie_out_never
= 1; /* true */
1312 tmp_ext
.cookie_plus
= 0;
1314 goto drop_and_release
;
1316 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1318 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1319 tcp_clear_options(&tmp_opt
);
1321 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1322 tcp_openreq_init(req
, &tmp_opt
, skb
);
1324 ireq
= inet_rsk(req
);
1325 ireq
->loc_addr
= daddr
;
1326 ireq
->rmt_addr
= saddr
;
1327 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1328 ireq
->opt
= tcp_v4_save_options(sk
, skb
);
1330 if (security_inet_conn_request(sk
, skb
, req
))
1333 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1334 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1337 isn
= cookie_v4_init_sequence(sk
, skb
, &req
->mss
);
1338 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1340 struct inet_peer
*peer
= NULL
;
1342 /* VJ's idea. We save last timestamp seen
1343 * from the destination in peer table, when entering
1344 * state TIME-WAIT, and check against it before
1345 * accepting new connection request.
1347 * If "isn" is not zero, this request hit alive
1348 * timewait bucket, so that all the necessary checks
1349 * are made in the function processing timewait state.
1351 if (tmp_opt
.saw_tstamp
&&
1352 tcp_death_row
.sysctl_tw_recycle
&&
1353 (dst
= inet_csk_route_req(sk
, req
)) != NULL
&&
1354 (peer
= rt_get_peer((struct rtable
*)dst
)) != NULL
&&
1355 peer
->daddr
.addr
.a4
== saddr
) {
1356 inet_peer_refcheck(peer
);
1357 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1358 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1360 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1361 goto drop_and_release
;
1364 /* Kill the following clause, if you dislike this way. */
1365 else if (!sysctl_tcp_syncookies
&&
1366 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1367 (sysctl_max_syn_backlog
>> 2)) &&
1368 (!peer
|| !peer
->tcp_ts_stamp
) &&
1369 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1370 /* Without syncookies last quarter of
1371 * backlog is filled with destinations,
1372 * proven to be alive.
1373 * It means that we continue to communicate
1374 * to destinations, already remembered
1375 * to the moment of synflood.
1377 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI4/%u\n",
1378 &saddr
, ntohs(tcp_hdr(skb
)->source
));
1379 goto drop_and_release
;
1382 isn
= tcp_v4_init_sequence(skb
);
1384 tcp_rsk(req
)->snt_isn
= isn
;
1386 if (tcp_v4_send_synack(sk
, dst
, req
,
1387 (struct request_values
*)&tmp_ext
) ||
1391 inet_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1401 EXPORT_SYMBOL(tcp_v4_conn_request
);
1405 * The three way handshake has completed - we got a valid synack -
1406 * now create the new socket.
1408 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1409 struct request_sock
*req
,
1410 struct dst_entry
*dst
)
1412 struct inet_request_sock
*ireq
;
1413 struct inet_sock
*newinet
;
1414 struct tcp_sock
*newtp
;
1416 #ifdef CONFIG_TCP_MD5SIG
1417 struct tcp_md5sig_key
*key
;
1419 struct ip_options_rcu
*inet_opt
;
1421 if (sk_acceptq_is_full(sk
))
1424 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1428 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1430 newtp
= tcp_sk(newsk
);
1431 newinet
= inet_sk(newsk
);
1432 ireq
= inet_rsk(req
);
1433 newinet
->inet_daddr
= ireq
->rmt_addr
;
1434 newinet
->inet_rcv_saddr
= ireq
->loc_addr
;
1435 newinet
->inet_saddr
= ireq
->loc_addr
;
1436 inet_opt
= ireq
->opt
;
1437 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1439 newinet
->mc_index
= inet_iif(skb
);
1440 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1441 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1443 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1444 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1446 if (!dst
&& (dst
= inet_csk_route_child_sock(sk
, newsk
, req
)) == NULL
)
1449 sk_setup_caps(newsk
, dst
);
1451 tcp_mtup_init(newsk
);
1452 tcp_sync_mss(newsk
, dst_mtu(dst
));
1453 newtp
->advmss
= dst_metric_advmss(dst
);
1454 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1455 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1456 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1458 tcp_initialize_rcv_mss(newsk
);
1460 #ifdef CONFIG_TCP_MD5SIG
1461 /* Copy over the MD5 key from the original socket */
1462 key
= tcp_v4_md5_do_lookup(sk
, newinet
->inet_daddr
);
1465 * We're using one, so create a matching key
1466 * on the newsk structure. If we fail to get
1467 * memory, then we end up not copying the key
1470 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1472 tcp_v4_md5_do_add(newsk
, newinet
->inet_daddr
,
1473 newkey
, key
->keylen
);
1474 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1478 if (__inet_inherit_port(sk
, newsk
) < 0)
1480 __inet_hash_nolisten(newsk
, NULL
);
1485 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1489 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1495 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1497 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1499 struct tcphdr
*th
= tcp_hdr(skb
);
1500 const struct iphdr
*iph
= ip_hdr(skb
);
1502 struct request_sock
**prev
;
1503 /* Find possible connection requests. */
1504 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1505 iph
->saddr
, iph
->daddr
);
1507 return tcp_check_req(sk
, skb
, req
, prev
);
1509 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1510 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1513 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1517 inet_twsk_put(inet_twsk(nsk
));
1521 #ifdef CONFIG_SYN_COOKIES
1523 sk
= cookie_v4_check(sk
, skb
, &(IPCB(skb
)->opt
));
1528 static __sum16
tcp_v4_checksum_init(struct sk_buff
*skb
)
1530 const struct iphdr
*iph
= ip_hdr(skb
);
1532 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1533 if (!tcp_v4_check(skb
->len
, iph
->saddr
,
1534 iph
->daddr
, skb
->csum
)) {
1535 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1540 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1541 skb
->len
, IPPROTO_TCP
, 0);
1543 if (skb
->len
<= 76) {
1544 return __skb_checksum_complete(skb
);
1550 /* The socket must have it's spinlock held when we get
1553 * We have a potential double-lock case here, so even when
1554 * doing backlog processing we use the BH locking scheme.
1555 * This is because we cannot sleep with the original spinlock
1558 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1561 #ifdef CONFIG_TCP_MD5SIG
1563 * We really want to reject the packet as early as possible
1565 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1566 * o There is an MD5 option and we're not expecting one
1568 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1572 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1573 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1574 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1581 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1584 if (sk
->sk_state
== TCP_LISTEN
) {
1585 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1590 if (tcp_child_process(sk
, nsk
, skb
)) {
1597 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1599 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1606 tcp_v4_send_reset(rsk
, skb
);
1609 /* Be careful here. If this function gets more complicated and
1610 * gcc suffers from register pressure on the x86, sk (in %ebx)
1611 * might be destroyed here. This current version compiles correctly,
1612 * but you have been warned.
1617 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1620 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1626 int tcp_v4_rcv(struct sk_buff
*skb
)
1628 const struct iphdr
*iph
;
1632 struct net
*net
= dev_net(skb
->dev
);
1634 if (skb
->pkt_type
!= PACKET_HOST
)
1637 /* Count it even if it's bad */
1638 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1640 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1645 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1647 if (!pskb_may_pull(skb
, th
->doff
* 4))
1650 /* An explanation is required here, I think.
1651 * Packet length and doff are validated by header prediction,
1652 * provided case of th->doff==0 is eliminated.
1653 * So, we defer the checks. */
1654 if (!skb_csum_unnecessary(skb
) && tcp_v4_checksum_init(skb
))
1659 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1660 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1661 skb
->len
- th
->doff
* 4);
1662 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1663 TCP_SKB_CB(skb
)->when
= 0;
1664 TCP_SKB_CB(skb
)->flags
= iph
->tos
;
1665 TCP_SKB_CB(skb
)->sacked
= 0;
1667 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1672 if (sk
->sk_state
== TCP_TIME_WAIT
)
1675 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1676 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1677 goto discard_and_relse
;
1680 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1681 goto discard_and_relse
;
1684 if (sk_filter(sk
, skb
))
1685 goto discard_and_relse
;
1689 bh_lock_sock_nested(sk
);
1691 if (!sock_owned_by_user(sk
)) {
1692 #ifdef CONFIG_NET_DMA
1693 struct tcp_sock
*tp
= tcp_sk(sk
);
1694 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1695 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1696 if (tp
->ucopy
.dma_chan
)
1697 ret
= tcp_v4_do_rcv(sk
, skb
);
1701 if (!tcp_prequeue(sk
, skb
))
1702 ret
= tcp_v4_do_rcv(sk
, skb
);
1704 } else if (unlikely(sk_add_backlog(sk
, skb
))) {
1706 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1707 goto discard_and_relse
;
1716 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1719 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
1721 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1723 tcp_v4_send_reset(NULL
, skb
);
1727 /* Discard frame. */
1736 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1737 inet_twsk_put(inet_twsk(sk
));
1741 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
1742 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1743 inet_twsk_put(inet_twsk(sk
));
1746 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1748 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1750 iph
->daddr
, th
->dest
,
1753 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
1754 inet_twsk_put(inet_twsk(sk
));
1758 /* Fall through to ACK */
1761 tcp_v4_timewait_ack(sk
, skb
);
1765 case TCP_TW_SUCCESS
:;
1770 struct inet_peer
*tcp_v4_get_peer(struct sock
*sk
, bool *release_it
)
1772 struct rtable
*rt
= (struct rtable
*) __sk_dst_get(sk
);
1773 struct inet_sock
*inet
= inet_sk(sk
);
1774 struct inet_peer
*peer
;
1777 inet
->cork
.fl
.u
.ip4
.daddr
!= inet
->inet_daddr
) {
1778 peer
= inet_getpeer_v4(inet
->inet_daddr
, 1);
1782 rt_bind_peer(rt
, 1);
1784 *release_it
= false;
1789 EXPORT_SYMBOL(tcp_v4_get_peer
);
1791 void *tcp_v4_tw_get_peer(struct sock
*sk
)
1793 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1795 return inet_getpeer_v4(tw
->tw_daddr
, 1);
1797 EXPORT_SYMBOL(tcp_v4_tw_get_peer
);
1799 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1800 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1801 .twsk_unique
= tcp_twsk_unique
,
1802 .twsk_destructor
= tcp_twsk_destructor
,
1803 .twsk_getpeer
= tcp_v4_tw_get_peer
,
1806 const struct inet_connection_sock_af_ops ipv4_specific
= {
1807 .queue_xmit
= ip_queue_xmit
,
1808 .send_check
= tcp_v4_send_check
,
1809 .rebuild_header
= inet_sk_rebuild_header
,
1810 .conn_request
= tcp_v4_conn_request
,
1811 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1812 .get_peer
= tcp_v4_get_peer
,
1813 .net_header_len
= sizeof(struct iphdr
),
1814 .setsockopt
= ip_setsockopt
,
1815 .getsockopt
= ip_getsockopt
,
1816 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1817 .sockaddr_len
= sizeof(struct sockaddr_in
),
1818 .bind_conflict
= inet_csk_bind_conflict
,
1819 #ifdef CONFIG_COMPAT
1820 .compat_setsockopt
= compat_ip_setsockopt
,
1821 .compat_getsockopt
= compat_ip_getsockopt
,
1824 EXPORT_SYMBOL(ipv4_specific
);
1826 #ifdef CONFIG_TCP_MD5SIG
1827 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1828 .md5_lookup
= tcp_v4_md5_lookup
,
1829 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1830 .md5_add
= tcp_v4_md5_add_func
,
1831 .md5_parse
= tcp_v4_parse_md5_keys
,
1835 /* NOTE: A lot of things set to zero explicitly by call to
1836 * sk_alloc() so need not be done here.
1838 static int tcp_v4_init_sock(struct sock
*sk
)
1840 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1841 struct tcp_sock
*tp
= tcp_sk(sk
);
1843 skb_queue_head_init(&tp
->out_of_order_queue
);
1844 tcp_init_xmit_timers(sk
);
1845 tcp_prequeue_init(tp
);
1847 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1848 tp
->mdev
= TCP_TIMEOUT_INIT
;
1850 /* So many TCP implementations out there (incorrectly) count the
1851 * initial SYN frame in their delayed-ACK and congestion control
1852 * algorithms that we must have the following bandaid to talk
1853 * efficiently to them. -DaveM
1857 /* See draft-stevens-tcpca-spec-01 for discussion of the
1858 * initialization of these values.
1860 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
1861 tp
->snd_cwnd_clamp
= ~0;
1862 tp
->mss_cache
= TCP_MSS_DEFAULT
;
1864 tp
->reordering
= sysctl_tcp_reordering
;
1865 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1867 sk
->sk_state
= TCP_CLOSE
;
1869 sk
->sk_write_space
= sk_stream_write_space
;
1870 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1872 icsk
->icsk_af_ops
= &ipv4_specific
;
1873 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1874 #ifdef CONFIG_TCP_MD5SIG
1875 tp
->af_specific
= &tcp_sock_ipv4_specific
;
1878 /* TCP Cookie Transactions */
1879 if (sysctl_tcp_cookie_size
> 0) {
1880 /* Default, cookies without s_data_payload. */
1882 kzalloc(sizeof(*tp
->cookie_values
),
1884 if (tp
->cookie_values
!= NULL
)
1885 kref_init(&tp
->cookie_values
->kref
);
1887 /* Presumed zeroed, in order of appearance:
1888 * cookie_in_always, cookie_out_never,
1889 * s_data_constant, s_data_in, s_data_out
1891 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1892 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1895 percpu_counter_inc(&tcp_sockets_allocated
);
1901 void tcp_v4_destroy_sock(struct sock
*sk
)
1903 struct tcp_sock
*tp
= tcp_sk(sk
);
1905 tcp_clear_xmit_timers(sk
);
1907 tcp_cleanup_congestion_control(sk
);
1909 /* Cleanup up the write buffer. */
1910 tcp_write_queue_purge(sk
);
1912 /* Cleans up our, hopefully empty, out_of_order_queue. */
1913 __skb_queue_purge(&tp
->out_of_order_queue
);
1915 #ifdef CONFIG_TCP_MD5SIG
1916 /* Clean up the MD5 key list, if any */
1917 if (tp
->md5sig_info
) {
1918 tcp_v4_clear_md5_list(sk
);
1919 kfree(tp
->md5sig_info
);
1920 tp
->md5sig_info
= NULL
;
1924 #ifdef CONFIG_NET_DMA
1925 /* Cleans up our sk_async_wait_queue */
1926 __skb_queue_purge(&sk
->sk_async_wait_queue
);
1929 /* Clean prequeue, it must be empty really */
1930 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1932 /* Clean up a referenced TCP bind bucket. */
1933 if (inet_csk(sk
)->icsk_bind_hash
)
1937 * If sendmsg cached page exists, toss it.
1939 if (sk
->sk_sndmsg_page
) {
1940 __free_page(sk
->sk_sndmsg_page
);
1941 sk
->sk_sndmsg_page
= NULL
;
1944 /* TCP Cookie Transactions */
1945 if (tp
->cookie_values
!= NULL
) {
1946 kref_put(&tp
->cookie_values
->kref
,
1947 tcp_cookie_values_release
);
1948 tp
->cookie_values
= NULL
;
1951 percpu_counter_dec(&tcp_sockets_allocated
);
1953 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1955 #ifdef CONFIG_PROC_FS
1956 /* Proc filesystem TCP sock list dumping. */
1958 static inline struct inet_timewait_sock
*tw_head(struct hlist_nulls_head
*head
)
1960 return hlist_nulls_empty(head
) ? NULL
:
1961 list_entry(head
->first
, struct inet_timewait_sock
, tw_node
);
1964 static inline struct inet_timewait_sock
*tw_next(struct inet_timewait_sock
*tw
)
1966 return !is_a_nulls(tw
->tw_node
.next
) ?
1967 hlist_nulls_entry(tw
->tw_node
.next
, typeof(*tw
), tw_node
) : NULL
;
1971 * Get next listener socket follow cur. If cur is NULL, get first socket
1972 * starting from bucket given in st->bucket; when st->bucket is zero the
1973 * very first socket in the hash table is returned.
1975 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1977 struct inet_connection_sock
*icsk
;
1978 struct hlist_nulls_node
*node
;
1979 struct sock
*sk
= cur
;
1980 struct inet_listen_hashbucket
*ilb
;
1981 struct tcp_iter_state
*st
= seq
->private;
1982 struct net
*net
= seq_file_net(seq
);
1985 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1986 spin_lock_bh(&ilb
->lock
);
1987 sk
= sk_nulls_head(&ilb
->head
);
1991 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1995 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
1996 struct request_sock
*req
= cur
;
1998 icsk
= inet_csk(st
->syn_wait_sk
);
2002 if (req
->rsk_ops
->family
== st
->family
) {
2008 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
2011 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
2013 sk
= sk_nulls_next(st
->syn_wait_sk
);
2014 st
->state
= TCP_SEQ_STATE_LISTENING
;
2015 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2017 icsk
= inet_csk(sk
);
2018 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2019 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
2021 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2022 sk
= sk_nulls_next(sk
);
2025 sk_nulls_for_each_from(sk
, node
) {
2026 if (!net_eq(sock_net(sk
), net
))
2028 if (sk
->sk_family
== st
->family
) {
2032 icsk
= inet_csk(sk
);
2033 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2034 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
2036 st
->uid
= sock_i_uid(sk
);
2037 st
->syn_wait_sk
= sk
;
2038 st
->state
= TCP_SEQ_STATE_OPENREQ
;
2042 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2044 spin_unlock_bh(&ilb
->lock
);
2046 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
2047 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2048 spin_lock_bh(&ilb
->lock
);
2049 sk
= sk_nulls_head(&ilb
->head
);
2057 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2059 struct tcp_iter_state
*st
= seq
->private;
2064 rc
= listening_get_next(seq
, NULL
);
2066 while (rc
&& *pos
) {
2067 rc
= listening_get_next(seq
, rc
);
2073 static inline int empty_bucket(struct tcp_iter_state
*st
)
2075 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
) &&
2076 hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2080 * Get first established socket starting from bucket given in st->bucket.
2081 * If st->bucket is zero, the very first socket in the hash is returned.
2083 static void *established_get_first(struct seq_file
*seq
)
2085 struct tcp_iter_state
*st
= seq
->private;
2086 struct net
*net
= seq_file_net(seq
);
2090 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2092 struct hlist_nulls_node
*node
;
2093 struct inet_timewait_sock
*tw
;
2094 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2096 /* Lockless fast path for the common case of empty buckets */
2097 if (empty_bucket(st
))
2101 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2102 if (sk
->sk_family
!= st
->family
||
2103 !net_eq(sock_net(sk
), net
)) {
2109 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2110 inet_twsk_for_each(tw
, node
,
2111 &tcp_hashinfo
.ehash
[st
->bucket
].twchain
) {
2112 if (tw
->tw_family
!= st
->family
||
2113 !net_eq(twsk_net(tw
), net
)) {
2119 spin_unlock_bh(lock
);
2120 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2126 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2128 struct sock
*sk
= cur
;
2129 struct inet_timewait_sock
*tw
;
2130 struct hlist_nulls_node
*node
;
2131 struct tcp_iter_state
*st
= seq
->private;
2132 struct net
*net
= seq_file_net(seq
);
2137 if (st
->state
== TCP_SEQ_STATE_TIME_WAIT
) {
2141 while (tw
&& (tw
->tw_family
!= st
->family
|| !net_eq(twsk_net(tw
), net
))) {
2148 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2149 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2151 /* Look for next non empty bucket */
2153 while (++st
->bucket
<= tcp_hashinfo
.ehash_mask
&&
2156 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2159 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2160 sk
= sk_nulls_head(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2162 sk
= sk_nulls_next(sk
);
2164 sk_nulls_for_each_from(sk
, node
) {
2165 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2169 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2170 tw
= tw_head(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2178 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2180 struct tcp_iter_state
*st
= seq
->private;
2184 rc
= established_get_first(seq
);
2187 rc
= established_get_next(seq
, rc
);
2193 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2196 struct tcp_iter_state
*st
= seq
->private;
2198 st
->state
= TCP_SEQ_STATE_LISTENING
;
2199 rc
= listening_get_idx(seq
, &pos
);
2202 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2203 rc
= established_get_idx(seq
, pos
);
2209 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2211 struct tcp_iter_state
*st
= seq
->private;
2212 int offset
= st
->offset
;
2213 int orig_num
= st
->num
;
2216 switch (st
->state
) {
2217 case TCP_SEQ_STATE_OPENREQ
:
2218 case TCP_SEQ_STATE_LISTENING
:
2219 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2221 st
->state
= TCP_SEQ_STATE_LISTENING
;
2222 rc
= listening_get_next(seq
, NULL
);
2223 while (offset
-- && rc
)
2224 rc
= listening_get_next(seq
, rc
);
2229 case TCP_SEQ_STATE_ESTABLISHED
:
2230 case TCP_SEQ_STATE_TIME_WAIT
:
2231 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2232 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2234 rc
= established_get_first(seq
);
2235 while (offset
-- && rc
)
2236 rc
= established_get_next(seq
, rc
);
2244 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2246 struct tcp_iter_state
*st
= seq
->private;
2249 if (*pos
&& *pos
== st
->last_pos
) {
2250 rc
= tcp_seek_last_pos(seq
);
2255 st
->state
= TCP_SEQ_STATE_LISTENING
;
2259 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2262 st
->last_pos
= *pos
;
2266 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2268 struct tcp_iter_state
*st
= seq
->private;
2271 if (v
== SEQ_START_TOKEN
) {
2272 rc
= tcp_get_idx(seq
, 0);
2276 switch (st
->state
) {
2277 case TCP_SEQ_STATE_OPENREQ
:
2278 case TCP_SEQ_STATE_LISTENING
:
2279 rc
= listening_get_next(seq
, v
);
2281 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2284 rc
= established_get_first(seq
);
2287 case TCP_SEQ_STATE_ESTABLISHED
:
2288 case TCP_SEQ_STATE_TIME_WAIT
:
2289 rc
= established_get_next(seq
, v
);
2294 st
->last_pos
= *pos
;
2298 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2300 struct tcp_iter_state
*st
= seq
->private;
2302 switch (st
->state
) {
2303 case TCP_SEQ_STATE_OPENREQ
:
2305 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2306 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2308 case TCP_SEQ_STATE_LISTENING
:
2309 if (v
!= SEQ_START_TOKEN
)
2310 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2312 case TCP_SEQ_STATE_TIME_WAIT
:
2313 case TCP_SEQ_STATE_ESTABLISHED
:
2315 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2320 static int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2322 struct tcp_seq_afinfo
*afinfo
= PDE(inode
)->data
;
2323 struct tcp_iter_state
*s
;
2326 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2327 sizeof(struct tcp_iter_state
));
2331 s
= ((struct seq_file
*)file
->private_data
)->private;
2332 s
->family
= afinfo
->family
;
2337 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2340 struct proc_dir_entry
*p
;
2342 afinfo
->seq_fops
.open
= tcp_seq_open
;
2343 afinfo
->seq_fops
.read
= seq_read
;
2344 afinfo
->seq_fops
.llseek
= seq_lseek
;
2345 afinfo
->seq_fops
.release
= seq_release_net
;
2347 afinfo
->seq_ops
.start
= tcp_seq_start
;
2348 afinfo
->seq_ops
.next
= tcp_seq_next
;
2349 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2351 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2352 &afinfo
->seq_fops
, afinfo
);
2357 EXPORT_SYMBOL(tcp_proc_register
);
2359 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2361 proc_net_remove(net
, afinfo
->name
);
2363 EXPORT_SYMBOL(tcp_proc_unregister
);
2365 static void get_openreq4(struct sock
*sk
, struct request_sock
*req
,
2366 struct seq_file
*f
, int i
, int uid
, int *len
)
2368 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2369 int ttd
= req
->expires
- jiffies
;
2371 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2372 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2375 ntohs(inet_sk(sk
)->inet_sport
),
2377 ntohs(ireq
->rmt_port
),
2379 0, 0, /* could print option size, but that is af dependent. */
2380 1, /* timers active (only the expire timer) */
2381 jiffies_to_clock_t(ttd
),
2384 0, /* non standard timer */
2385 0, /* open_requests have no inode */
2386 atomic_read(&sk
->sk_refcnt
),
2391 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
, int *len
)
2394 unsigned long timer_expires
;
2395 struct tcp_sock
*tp
= tcp_sk(sk
);
2396 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2397 struct inet_sock
*inet
= inet_sk(sk
);
2398 __be32 dest
= inet
->inet_daddr
;
2399 __be32 src
= inet
->inet_rcv_saddr
;
2400 __u16 destp
= ntohs(inet
->inet_dport
);
2401 __u16 srcp
= ntohs(inet
->inet_sport
);
2404 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2406 timer_expires
= icsk
->icsk_timeout
;
2407 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2409 timer_expires
= icsk
->icsk_timeout
;
2410 } else if (timer_pending(&sk
->sk_timer
)) {
2412 timer_expires
= sk
->sk_timer
.expires
;
2415 timer_expires
= jiffies
;
2418 if (sk
->sk_state
== TCP_LISTEN
)
2419 rx_queue
= sk
->sk_ack_backlog
;
2422 * because we dont lock socket, we might find a transient negative value
2424 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2426 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2427 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2428 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2429 tp
->write_seq
- tp
->snd_una
,
2432 jiffies_to_clock_t(timer_expires
- jiffies
),
2433 icsk
->icsk_retransmits
,
2435 icsk
->icsk_probes_out
,
2437 atomic_read(&sk
->sk_refcnt
), sk
,
2438 jiffies_to_clock_t(icsk
->icsk_rto
),
2439 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2440 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2442 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
,
2446 static void get_timewait4_sock(struct inet_timewait_sock
*tw
,
2447 struct seq_file
*f
, int i
, int *len
)
2451 int ttd
= tw
->tw_ttd
- jiffies
;
2456 dest
= tw
->tw_daddr
;
2457 src
= tw
->tw_rcv_saddr
;
2458 destp
= ntohs(tw
->tw_dport
);
2459 srcp
= ntohs(tw
->tw_sport
);
2461 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2462 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2463 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2464 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2465 atomic_read(&tw
->tw_refcnt
), tw
, len
);
2470 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2472 struct tcp_iter_state
*st
;
2475 if (v
== SEQ_START_TOKEN
) {
2476 seq_printf(seq
, "%-*s\n", TMPSZ
- 1,
2477 " sl local_address rem_address st tx_queue "
2478 "rx_queue tr tm->when retrnsmt uid timeout "
2484 switch (st
->state
) {
2485 case TCP_SEQ_STATE_LISTENING
:
2486 case TCP_SEQ_STATE_ESTABLISHED
:
2487 get_tcp4_sock(v
, seq
, st
->num
, &len
);
2489 case TCP_SEQ_STATE_OPENREQ
:
2490 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
, &len
);
2492 case TCP_SEQ_STATE_TIME_WAIT
:
2493 get_timewait4_sock(v
, seq
, st
->num
, &len
);
2496 seq_printf(seq
, "%*s\n", TMPSZ
- 1 - len
, "");
2501 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2505 .owner
= THIS_MODULE
,
2508 .show
= tcp4_seq_show
,
2512 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2514 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2517 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2519 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2522 static struct pernet_operations tcp4_net_ops
= {
2523 .init
= tcp4_proc_init_net
,
2524 .exit
= tcp4_proc_exit_net
,
2527 int __init
tcp4_proc_init(void)
2529 return register_pernet_subsys(&tcp4_net_ops
);
2532 void tcp4_proc_exit(void)
2534 unregister_pernet_subsys(&tcp4_net_ops
);
2536 #endif /* CONFIG_PROC_FS */
2538 struct sk_buff
**tcp4_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
2540 const struct iphdr
*iph
= skb_gro_network_header(skb
);
2542 switch (skb
->ip_summed
) {
2543 case CHECKSUM_COMPLETE
:
2544 if (!tcp_v4_check(skb_gro_len(skb
), iph
->saddr
, iph
->daddr
,
2546 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2552 NAPI_GRO_CB(skb
)->flush
= 1;
2556 return tcp_gro_receive(head
, skb
);
2559 int tcp4_gro_complete(struct sk_buff
*skb
)
2561 const struct iphdr
*iph
= ip_hdr(skb
);
2562 struct tcphdr
*th
= tcp_hdr(skb
);
2564 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
2565 iph
->saddr
, iph
->daddr
, 0);
2566 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2568 return tcp_gro_complete(skb
);
2571 struct proto tcp_prot
= {
2573 .owner
= THIS_MODULE
,
2575 .connect
= tcp_v4_connect
,
2576 .disconnect
= tcp_disconnect
,
2577 .accept
= inet_csk_accept
,
2579 .init
= tcp_v4_init_sock
,
2580 .destroy
= tcp_v4_destroy_sock
,
2581 .shutdown
= tcp_shutdown
,
2582 .setsockopt
= tcp_setsockopt
,
2583 .getsockopt
= tcp_getsockopt
,
2584 .recvmsg
= tcp_recvmsg
,
2585 .sendmsg
= tcp_sendmsg
,
2586 .sendpage
= tcp_sendpage
,
2587 .backlog_rcv
= tcp_v4_do_rcv
,
2589 .unhash
= inet_unhash
,
2590 .get_port
= inet_csk_get_port
,
2591 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2592 .sockets_allocated
= &tcp_sockets_allocated
,
2593 .orphan_count
= &tcp_orphan_count
,
2594 .memory_allocated
= &tcp_memory_allocated
,
2595 .memory_pressure
= &tcp_memory_pressure
,
2596 .sysctl_mem
= sysctl_tcp_mem
,
2597 .sysctl_wmem
= sysctl_tcp_wmem
,
2598 .sysctl_rmem
= sysctl_tcp_rmem
,
2599 .max_header
= MAX_TCP_HEADER
,
2600 .obj_size
= sizeof(struct tcp_sock
),
2601 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2602 .twsk_prot
= &tcp_timewait_sock_ops
,
2603 .rsk_prot
= &tcp_request_sock_ops
,
2604 .h
.hashinfo
= &tcp_hashinfo
,
2605 .no_autobind
= true,
2606 #ifdef CONFIG_COMPAT
2607 .compat_setsockopt
= compat_tcp_setsockopt
,
2608 .compat_getsockopt
= compat_tcp_getsockopt
,
2611 EXPORT_SYMBOL(tcp_prot
);
2614 static int __net_init
tcp_sk_init(struct net
*net
)
2616 return inet_ctl_sock_create(&net
->ipv4
.tcp_sock
,
2617 PF_INET
, SOCK_RAW
, IPPROTO_TCP
, net
);
2620 static void __net_exit
tcp_sk_exit(struct net
*net
)
2622 inet_ctl_sock_destroy(net
->ipv4
.tcp_sock
);
2625 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2627 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2630 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2631 .init
= tcp_sk_init
,
2632 .exit
= tcp_sk_exit
,
2633 .exit_batch
= tcp_sk_exit_batch
,
2636 void __init
tcp_v4_init(void)
2638 inet_hashinfo_init(&tcp_hashinfo
);
2639 if (register_pernet_subsys(&tcp_sk_ops
))
2640 panic("Failed to create the TCP control socket.\n");