195c618aba6c939edb8395b02a18a1cbe2f4a9d4
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / tcp_ipv4.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
97
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
100
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 {
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
107 }
108
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
121
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
124 */
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
135 }
136
137 return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 {
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
149 struct flowi4 *fl4;
150 struct rtable *rt;
151 int err;
152 struct ip_options_rcu *inet_opt;
153
154 if (addr_len < sizeof(struct sockaddr_in))
155 return -EINVAL;
156
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
159
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
164 if (!daddr)
165 return -EINVAL;
166 nexthop = inet_opt->opt.faddr;
167 }
168
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP,
175 orig_sport, orig_dport, sk, true);
176 if (IS_ERR(rt)) {
177 err = PTR_ERR(rt);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180 return err;
181 }
182
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184 ip_rt_put(rt);
185 return -ENETUNREACH;
186 }
187
188 if (!inet_opt || !inet_opt->opt.srr)
189 daddr = fl4->daddr;
190
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
194
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
200 tp->write_seq = 0;
201 }
202
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
206
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
209
210 inet_csk(sk)->icsk_ext_hdr_len = 0;
211 if (inet_opt)
212 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213
214 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
220 */
221 tcp_set_state(sk, TCP_SYN_SENT);
222 err = inet_hash_connect(&tcp_death_row, sk);
223 if (err)
224 goto failure;
225
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
228 if (IS_ERR(rt)) {
229 err = PTR_ERR(rt);
230 rt = NULL;
231 goto failure;
232 }
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
236
237 if (!tp->write_seq && likely(!tp->repair))
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 inet->inet_daddr,
240 inet->inet_sport,
241 usin->sin_port);
242
243 inet->inet_id = tp->write_seq ^ jiffies;
244
245 err = tcp_connect(sk);
246
247 rt = NULL;
248 if (err)
249 goto failure;
250
251 return 0;
252
253 failure:
254 /*
255 * This unhashes the socket and releases the local port,
256 * if necessary.
257 */
258 tcp_set_state(sk, TCP_CLOSE);
259 ip_rt_put(rt);
260 sk->sk_route_caps = 0;
261 inet->inet_dport = 0;
262 return err;
263 }
264 EXPORT_SYMBOL(tcp_v4_connect);
265
266 /*
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
270 */
271 void tcp_v4_mtu_reduced(struct sock *sk)
272 {
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info;
276
277 dst = inet_csk_update_pmtu(sk, mtu);
278 if (!dst)
279 return;
280
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
283 */
284 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285 sk->sk_err_soft = EMSGSIZE;
286
287 mtu = dst_mtu(dst);
288
289 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
290 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
291 tcp_sync_mss(sk, mtu);
292
293 /* Resend the TCP packet because it's
294 * clear that the old packet has been
295 * dropped. This is the new "fast" path mtu
296 * discovery.
297 */
298 tcp_simple_retransmit(sk);
299 } /* else let the usual retransmit timer handle it */
300 }
301 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
302
303 static void do_redirect(struct sk_buff *skb, struct sock *sk)
304 {
305 struct dst_entry *dst = __sk_dst_check(sk, 0);
306
307 if (dst)
308 dst->ops->redirect(dst, sk, skb);
309 }
310
311 /*
312 * This routine is called by the ICMP module when it gets some
313 * sort of error condition. If err < 0 then the socket should
314 * be closed and the error returned to the user. If err > 0
315 * it's just the icmp type << 8 | icmp code. After adjustment
316 * header points to the first 8 bytes of the tcp header. We need
317 * to find the appropriate port.
318 *
319 * The locking strategy used here is very "optimistic". When
320 * someone else accesses the socket the ICMP is just dropped
321 * and for some paths there is no check at all.
322 * A more general error queue to queue errors for later handling
323 * is probably better.
324 *
325 */
326
327 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
328 {
329 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
330 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
331 struct inet_connection_sock *icsk;
332 struct tcp_sock *tp;
333 struct inet_sock *inet;
334 const int type = icmp_hdr(icmp_skb)->type;
335 const int code = icmp_hdr(icmp_skb)->code;
336 struct sock *sk;
337 struct sk_buff *skb;
338 struct request_sock *req;
339 __u32 seq;
340 __u32 remaining;
341 int err;
342 struct net *net = dev_net(icmp_skb->dev);
343
344 if (icmp_skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
346 return;
347 }
348
349 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(icmp_skb));
351 if (!sk) {
352 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 return;
354 }
355 if (sk->sk_state == TCP_TIME_WAIT) {
356 inet_twsk_put(inet_twsk(sk));
357 return;
358 }
359
360 bh_lock_sock(sk);
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
363 * We do take care of PMTU discovery (RFC1191) special case :
364 * we can receive locally generated ICMP messages while socket is held.
365 */
366 if (sock_owned_by_user(sk)) {
367 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
369 }
370 if (sk->sk_state == TCP_CLOSE)
371 goto out;
372
373 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
375 goto out;
376 }
377
378 icsk = inet_csk(sk);
379 tp = tcp_sk(sk);
380 req = tp->fastopen_rsk;
381 seq = ntohl(th->seq);
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, tp->snd_una, tp->snd_nxt) &&
384 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
385 /* For a Fast Open socket, allow seq to be snt_isn. */
386 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
387 goto out;
388 }
389
390 switch (type) {
391 case ICMP_REDIRECT:
392 if (!sock_owned_by_user(sk))
393 do_redirect(icmp_skb, sk);
394 goto out;
395 case ICMP_SOURCE_QUENCH:
396 /* Just silently ignore these. */
397 goto out;
398 case ICMP_PARAMETERPROB:
399 err = EPROTO;
400 break;
401 case ICMP_DEST_UNREACH:
402 if (code > NR_ICMP_UNREACH)
403 goto out;
404
405 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
406 /* We are not interested in TCP_LISTEN and open_requests
407 * (SYN-ACKs send out by Linux are always <576bytes so
408 * they should go through unfragmented).
409 */
410 if (sk->sk_state == TCP_LISTEN)
411 goto out;
412
413 tp->mtu_info = info;
414 if (!sock_owned_by_user(sk)) {
415 tcp_v4_mtu_reduced(sk);
416 } else {
417 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
418 sock_hold(sk);
419 }
420 goto out;
421 }
422
423 err = icmp_err_convert[code].errno;
424 /* check if icmp_skb allows revert of backoff
425 * (see draft-zimmermann-tcp-lcd) */
426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427 break;
428 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
429 !icsk->icsk_backoff)
430 break;
431
432 /* XXX (TFO) - revisit the following logic for TFO */
433
434 if (sock_owned_by_user(sk))
435 break;
436
437 icsk->icsk_backoff--;
438 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
439 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
440 tcp_bound_rto(sk);
441
442 skb = tcp_write_queue_head(sk);
443 BUG_ON(!skb);
444
445 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
446 tcp_time_stamp - TCP_SKB_CB(skb)->when);
447
448 if (remaining) {
449 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
450 remaining, TCP_RTO_MAX);
451 } else {
452 /* RTO revert clocked out retransmission.
453 * Will retransmit now */
454 tcp_retransmit_timer(sk);
455 }
456
457 break;
458 case ICMP_TIME_EXCEEDED:
459 err = EHOSTUNREACH;
460 break;
461 default:
462 goto out;
463 }
464
465 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
466 * than following the TCP_SYN_RECV case and closing the socket,
467 * we ignore the ICMP error and keep trying like a fully established
468 * socket. Is this the right thing to do?
469 */
470 if (req && req->sk == NULL)
471 goto out;
472
473 switch (sk->sk_state) {
474 struct request_sock *req, **prev;
475 case TCP_LISTEN:
476 if (sock_owned_by_user(sk))
477 goto out;
478
479 req = inet_csk_search_req(sk, &prev, th->dest,
480 iph->daddr, iph->saddr);
481 if (!req)
482 goto out;
483
484 /* ICMPs are not backlogged, hence we cannot get
485 an established socket here.
486 */
487 WARN_ON(req->sk);
488
489 if (seq != tcp_rsk(req)->snt_isn) {
490 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
491 goto out;
492 }
493
494 /*
495 * Still in SYN_RECV, just remove it silently.
496 * There is no good way to pass the error to the newly
497 * created socket, and POSIX does not want network
498 * errors returned from accept().
499 */
500 inet_csk_reqsk_queue_drop(sk, req, prev);
501 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
502 goto out;
503
504 case TCP_SYN_SENT:
505 case TCP_SYN_RECV: /* Cannot happen.
506 It can f.e. if SYNs crossed,
507 or Fast Open.
508 */
509 if (!sock_owned_by_user(sk)) {
510 sk->sk_err = err;
511
512 sk->sk_error_report(sk);
513
514 tcp_done(sk);
515 } else {
516 sk->sk_err_soft = err;
517 }
518 goto out;
519 }
520
521 /* If we've already connected we will keep trying
522 * until we time out, or the user gives up.
523 *
524 * rfc1122 4.2.3.9 allows to consider as hard errors
525 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
526 * but it is obsoleted by pmtu discovery).
527 *
528 * Note, that in modern internet, where routing is unreliable
529 * and in each dark corner broken firewalls sit, sending random
530 * errors ordered by their masters even this two messages finally lose
531 * their original sense (even Linux sends invalid PORT_UNREACHs)
532 *
533 * Now we are in compliance with RFCs.
534 * --ANK (980905)
535 */
536
537 inet = inet_sk(sk);
538 if (!sock_owned_by_user(sk) && inet->recverr) {
539 sk->sk_err = err;
540 sk->sk_error_report(sk);
541 } else { /* Only an error on timeout */
542 sk->sk_err_soft = err;
543 }
544
545 out:
546 bh_unlock_sock(sk);
547 sock_put(sk);
548 }
549
550 static void __tcp_v4_send_check(struct sk_buff *skb,
551 __be32 saddr, __be32 daddr)
552 {
553 struct tcphdr *th = tcp_hdr(skb);
554
555 if (skb->ip_summed == CHECKSUM_PARTIAL) {
556 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
557 skb->csum_start = skb_transport_header(skb) - skb->head;
558 skb->csum_offset = offsetof(struct tcphdr, check);
559 } else {
560 th->check = tcp_v4_check(skb->len, saddr, daddr,
561 csum_partial(th,
562 th->doff << 2,
563 skb->csum));
564 }
565 }
566
567 /* This routine computes an IPv4 TCP checksum. */
568 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
569 {
570 const struct inet_sock *inet = inet_sk(sk);
571
572 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
573 }
574 EXPORT_SYMBOL(tcp_v4_send_check);
575
576 int tcp_v4_gso_send_check(struct sk_buff *skb)
577 {
578 const struct iphdr *iph;
579 struct tcphdr *th;
580
581 if (!pskb_may_pull(skb, sizeof(*th)))
582 return -EINVAL;
583
584 iph = ip_hdr(skb);
585 th = tcp_hdr(skb);
586
587 th->check = 0;
588 skb->ip_summed = CHECKSUM_PARTIAL;
589 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
590 return 0;
591 }
592
593 /*
594 * This routine will send an RST to the other tcp.
595 *
596 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
597 * for reset.
598 * Answer: if a packet caused RST, it is not for a socket
599 * existing in our system, if it is matched to a socket,
600 * it is just duplicate segment or bug in other side's TCP.
601 * So that we build reply only basing on parameters
602 * arrived with segment.
603 * Exception: precedence violation. We do not implement it in any case.
604 */
605
606 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
607 {
608 const struct tcphdr *th = tcp_hdr(skb);
609 struct {
610 struct tcphdr th;
611 #ifdef CONFIG_TCP_MD5SIG
612 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
613 #endif
614 } rep;
615 struct ip_reply_arg arg;
616 #ifdef CONFIG_TCP_MD5SIG
617 struct tcp_md5sig_key *key;
618 const __u8 *hash_location = NULL;
619 unsigned char newhash[16];
620 int genhash;
621 struct sock *sk1 = NULL;
622 #endif
623 struct net *net;
624
625 /* Never send a reset in response to a reset. */
626 if (th->rst)
627 return;
628
629 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
630 return;
631
632 /* Swap the send and the receive. */
633 memset(&rep, 0, sizeof(rep));
634 rep.th.dest = th->source;
635 rep.th.source = th->dest;
636 rep.th.doff = sizeof(struct tcphdr) / 4;
637 rep.th.rst = 1;
638
639 if (th->ack) {
640 rep.th.seq = th->ack_seq;
641 } else {
642 rep.th.ack = 1;
643 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
644 skb->len - (th->doff << 2));
645 }
646
647 memset(&arg, 0, sizeof(arg));
648 arg.iov[0].iov_base = (unsigned char *)&rep;
649 arg.iov[0].iov_len = sizeof(rep.th);
650
651 #ifdef CONFIG_TCP_MD5SIG
652 hash_location = tcp_parse_md5sig_option(th);
653 if (!sk && hash_location) {
654 /*
655 * active side is lost. Try to find listening socket through
656 * source port, and then find md5 key through listening socket.
657 * we are not loose security here:
658 * Incoming packet is checked with md5 hash with finding key,
659 * no RST generated if md5 hash doesn't match.
660 */
661 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
662 &tcp_hashinfo, ip_hdr(skb)->saddr,
663 th->source, ip_hdr(skb)->daddr,
664 ntohs(th->source), inet_iif(skb));
665 /* don't send rst if it can't find key */
666 if (!sk1)
667 return;
668 rcu_read_lock();
669 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
670 &ip_hdr(skb)->saddr, AF_INET);
671 if (!key)
672 goto release_sk1;
673
674 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
675 if (genhash || memcmp(hash_location, newhash, 16) != 0)
676 goto release_sk1;
677 } else {
678 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
679 &ip_hdr(skb)->saddr,
680 AF_INET) : NULL;
681 }
682
683 if (key) {
684 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
685 (TCPOPT_NOP << 16) |
686 (TCPOPT_MD5SIG << 8) |
687 TCPOLEN_MD5SIG);
688 /* Update length and the length the header thinks exists */
689 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
690 rep.th.doff = arg.iov[0].iov_len / 4;
691
692 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
693 key, ip_hdr(skb)->saddr,
694 ip_hdr(skb)->daddr, &rep.th);
695 }
696 #endif
697 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
698 ip_hdr(skb)->saddr, /* XXX */
699 arg.iov[0].iov_len, IPPROTO_TCP, 0);
700 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
701 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
702 /* When socket is gone, all binding information is lost.
703 * routing might fail in this case. No choice here, if we choose to force
704 * input interface, we will misroute in case of asymmetric route.
705 */
706 if (sk)
707 arg.bound_dev_if = sk->sk_bound_dev_if;
708
709 net = dev_net(skb_dst(skb)->dev);
710 arg.tos = ip_hdr(skb)->tos;
711 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
712 skb, ip_hdr(skb)->saddr,
713 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
714
715 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
716 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
717
718 #ifdef CONFIG_TCP_MD5SIG
719 release_sk1:
720 if (sk1) {
721 rcu_read_unlock();
722 sock_put(sk1);
723 }
724 #endif
725 }
726
727 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
728 outside socket context is ugly, certainly. What can I do?
729 */
730
731 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
732 u32 win, u32 tsval, u32 tsecr, int oif,
733 struct tcp_md5sig_key *key,
734 int reply_flags, u8 tos)
735 {
736 const struct tcphdr *th = tcp_hdr(skb);
737 struct {
738 struct tcphdr th;
739 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
740 #ifdef CONFIG_TCP_MD5SIG
741 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
742 #endif
743 ];
744 } rep;
745 struct ip_reply_arg arg;
746 struct net *net = dev_net(skb_dst(skb)->dev);
747
748 memset(&rep.th, 0, sizeof(struct tcphdr));
749 memset(&arg, 0, sizeof(arg));
750
751 arg.iov[0].iov_base = (unsigned char *)&rep;
752 arg.iov[0].iov_len = sizeof(rep.th);
753 if (tsecr) {
754 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
755 (TCPOPT_TIMESTAMP << 8) |
756 TCPOLEN_TIMESTAMP);
757 rep.opt[1] = htonl(tsval);
758 rep.opt[2] = htonl(tsecr);
759 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
760 }
761
762 /* Swap the send and the receive. */
763 rep.th.dest = th->source;
764 rep.th.source = th->dest;
765 rep.th.doff = arg.iov[0].iov_len / 4;
766 rep.th.seq = htonl(seq);
767 rep.th.ack_seq = htonl(ack);
768 rep.th.ack = 1;
769 rep.th.window = htons(win);
770
771 #ifdef CONFIG_TCP_MD5SIG
772 if (key) {
773 int offset = (tsecr) ? 3 : 0;
774
775 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
776 (TCPOPT_NOP << 16) |
777 (TCPOPT_MD5SIG << 8) |
778 TCPOLEN_MD5SIG);
779 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
780 rep.th.doff = arg.iov[0].iov_len/4;
781
782 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
783 key, ip_hdr(skb)->saddr,
784 ip_hdr(skb)->daddr, &rep.th);
785 }
786 #endif
787 arg.flags = reply_flags;
788 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
789 ip_hdr(skb)->saddr, /* XXX */
790 arg.iov[0].iov_len, IPPROTO_TCP, 0);
791 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
792 if (oif)
793 arg.bound_dev_if = oif;
794 arg.tos = tos;
795 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
796 skb, ip_hdr(skb)->saddr,
797 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
798
799 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
800 }
801
802 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
803 {
804 struct inet_timewait_sock *tw = inet_twsk(sk);
805 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
806
807 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
808 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
809 tcp_time_stamp + tcptw->tw_ts_offset,
810 tcptw->tw_ts_recent,
811 tw->tw_bound_dev_if,
812 tcp_twsk_md5_key(tcptw),
813 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
814 tw->tw_tos
815 );
816
817 inet_twsk_put(tw);
818 }
819
820 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
821 struct request_sock *req)
822 {
823 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
824 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
825 */
826 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
827 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
828 tcp_rsk(req)->rcv_nxt,
829 req->rcv_wnd >> inet_rsk(req)->rcv_wscale,
830 tcp_time_stamp,
831 req->ts_recent,
832 0,
833 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
834 AF_INET),
835 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
836 ip_hdr(skb)->tos);
837 }
838
839 /*
840 * Send a SYN-ACK after having received a SYN.
841 * This still operates on a request_sock only, not on a big
842 * socket.
843 */
844 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
845 struct request_sock *req,
846 u16 queue_mapping,
847 bool nocache)
848 {
849 const struct inet_request_sock *ireq = inet_rsk(req);
850 struct flowi4 fl4;
851 int err = -1;
852 struct sk_buff * skb;
853
854 /* First, grab a route. */
855 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
856 return -1;
857
858 skb = tcp_make_synack(sk, dst, req, NULL);
859
860 if (skb) {
861 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
862
863 skb_set_queue_mapping(skb, queue_mapping);
864 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
865 ireq->rmt_addr,
866 ireq->opt);
867 err = net_xmit_eval(err);
868 if (!tcp_rsk(req)->snt_synack && !err)
869 tcp_rsk(req)->snt_synack = tcp_time_stamp;
870 }
871
872 return err;
873 }
874
875 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
876 {
877 int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
878
879 if (!res)
880 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
881 return res;
882 }
883
884 /*
885 * IPv4 request_sock destructor.
886 */
887 static void tcp_v4_reqsk_destructor(struct request_sock *req)
888 {
889 kfree(inet_rsk(req)->opt);
890 }
891
892 /*
893 * Return true if a syncookie should be sent
894 */
895 bool tcp_syn_flood_action(struct sock *sk,
896 const struct sk_buff *skb,
897 const char *proto)
898 {
899 const char *msg = "Dropping request";
900 bool want_cookie = false;
901 struct listen_sock *lopt;
902
903
904
905 #ifdef CONFIG_SYN_COOKIES
906 if (sysctl_tcp_syncookies) {
907 msg = "Sending cookies";
908 want_cookie = true;
909 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
910 } else
911 #endif
912 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
913
914 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
915 if (!lopt->synflood_warned) {
916 lopt->synflood_warned = 1;
917 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
918 proto, ntohs(tcp_hdr(skb)->dest), msg);
919 }
920 return want_cookie;
921 }
922 EXPORT_SYMBOL(tcp_syn_flood_action);
923
924 /*
925 * Save and compile IPv4 options into the request_sock if needed.
926 */
927 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
928 {
929 const struct ip_options *opt = &(IPCB(skb)->opt);
930 struct ip_options_rcu *dopt = NULL;
931
932 if (opt && opt->optlen) {
933 int opt_size = sizeof(*dopt) + opt->optlen;
934
935 dopt = kmalloc(opt_size, GFP_ATOMIC);
936 if (dopt) {
937 if (ip_options_echo(&dopt->opt, skb)) {
938 kfree(dopt);
939 dopt = NULL;
940 }
941 }
942 }
943 return dopt;
944 }
945
946 #ifdef CONFIG_TCP_MD5SIG
947 /*
948 * RFC2385 MD5 checksumming requires a mapping of
949 * IP address->MD5 Key.
950 * We need to maintain these in the sk structure.
951 */
952
953 /* Find the Key structure for an address. */
954 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
955 const union tcp_md5_addr *addr,
956 int family)
957 {
958 struct tcp_sock *tp = tcp_sk(sk);
959 struct tcp_md5sig_key *key;
960 unsigned int size = sizeof(struct in_addr);
961 struct tcp_md5sig_info *md5sig;
962
963 /* caller either holds rcu_read_lock() or socket lock */
964 md5sig = rcu_dereference_check(tp->md5sig_info,
965 sock_owned_by_user(sk) ||
966 lockdep_is_held(&sk->sk_lock.slock));
967 if (!md5sig)
968 return NULL;
969 #if IS_ENABLED(CONFIG_IPV6)
970 if (family == AF_INET6)
971 size = sizeof(struct in6_addr);
972 #endif
973 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
974 if (key->family != family)
975 continue;
976 if (!memcmp(&key->addr, addr, size))
977 return key;
978 }
979 return NULL;
980 }
981 EXPORT_SYMBOL(tcp_md5_do_lookup);
982
983 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
984 struct sock *addr_sk)
985 {
986 union tcp_md5_addr *addr;
987
988 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
989 return tcp_md5_do_lookup(sk, addr, AF_INET);
990 }
991 EXPORT_SYMBOL(tcp_v4_md5_lookup);
992
993 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
994 struct request_sock *req)
995 {
996 union tcp_md5_addr *addr;
997
998 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
999 return tcp_md5_do_lookup(sk, addr, AF_INET);
1000 }
1001
1002 /* This can be called on a newly created socket, from other files */
1003 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1004 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1005 {
1006 /* Add Key to the list */
1007 struct tcp_md5sig_key *key;
1008 struct tcp_sock *tp = tcp_sk(sk);
1009 struct tcp_md5sig_info *md5sig;
1010
1011 key = tcp_md5_do_lookup(sk, addr, family);
1012 if (key) {
1013 /* Pre-existing entry - just update that one. */
1014 memcpy(key->key, newkey, newkeylen);
1015 key->keylen = newkeylen;
1016 return 0;
1017 }
1018
1019 md5sig = rcu_dereference_protected(tp->md5sig_info,
1020 sock_owned_by_user(sk) ||
1021 lockdep_is_held(&sk->sk_lock.slock));
1022 if (!md5sig) {
1023 md5sig = kmalloc(sizeof(*md5sig), gfp);
1024 if (!md5sig)
1025 return -ENOMEM;
1026
1027 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1028 INIT_HLIST_HEAD(&md5sig->head);
1029 rcu_assign_pointer(tp->md5sig_info, md5sig);
1030 }
1031
1032 key = sock_kmalloc(sk, sizeof(*key), gfp);
1033 if (!key)
1034 return -ENOMEM;
1035 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1036 sock_kfree_s(sk, key, sizeof(*key));
1037 return -ENOMEM;
1038 }
1039
1040 memcpy(key->key, newkey, newkeylen);
1041 key->keylen = newkeylen;
1042 key->family = family;
1043 memcpy(&key->addr, addr,
1044 (family == AF_INET6) ? sizeof(struct in6_addr) :
1045 sizeof(struct in_addr));
1046 hlist_add_head_rcu(&key->node, &md5sig->head);
1047 return 0;
1048 }
1049 EXPORT_SYMBOL(tcp_md5_do_add);
1050
1051 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1052 {
1053 struct tcp_sock *tp = tcp_sk(sk);
1054 struct tcp_md5sig_key *key;
1055 struct tcp_md5sig_info *md5sig;
1056
1057 key = tcp_md5_do_lookup(sk, addr, family);
1058 if (!key)
1059 return -ENOENT;
1060 hlist_del_rcu(&key->node);
1061 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1062 kfree_rcu(key, rcu);
1063 md5sig = rcu_dereference_protected(tp->md5sig_info,
1064 sock_owned_by_user(sk));
1065 if (hlist_empty(&md5sig->head))
1066 tcp_free_md5sig_pool();
1067 return 0;
1068 }
1069 EXPORT_SYMBOL(tcp_md5_do_del);
1070
1071 static void tcp_clear_md5_list(struct sock *sk)
1072 {
1073 struct tcp_sock *tp = tcp_sk(sk);
1074 struct tcp_md5sig_key *key;
1075 struct hlist_node *n;
1076 struct tcp_md5sig_info *md5sig;
1077
1078 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1079
1080 if (!hlist_empty(&md5sig->head))
1081 tcp_free_md5sig_pool();
1082 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1083 hlist_del_rcu(&key->node);
1084 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1085 kfree_rcu(key, rcu);
1086 }
1087 }
1088
1089 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1090 int optlen)
1091 {
1092 struct tcp_md5sig cmd;
1093 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1094
1095 if (optlen < sizeof(cmd))
1096 return -EINVAL;
1097
1098 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1099 return -EFAULT;
1100
1101 if (sin->sin_family != AF_INET)
1102 return -EINVAL;
1103
1104 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1105 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1106 AF_INET);
1107
1108 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1109 return -EINVAL;
1110
1111 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1112 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1113 GFP_KERNEL);
1114 }
1115
1116 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1117 __be32 daddr, __be32 saddr, int nbytes)
1118 {
1119 struct tcp4_pseudohdr *bp;
1120 struct scatterlist sg;
1121
1122 bp = &hp->md5_blk.ip4;
1123
1124 /*
1125 * 1. the TCP pseudo-header (in the order: source IP address,
1126 * destination IP address, zero-padded protocol number, and
1127 * segment length)
1128 */
1129 bp->saddr = saddr;
1130 bp->daddr = daddr;
1131 bp->pad = 0;
1132 bp->protocol = IPPROTO_TCP;
1133 bp->len = cpu_to_be16(nbytes);
1134
1135 sg_init_one(&sg, bp, sizeof(*bp));
1136 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1137 }
1138
1139 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1140 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1141 {
1142 struct tcp_md5sig_pool *hp;
1143 struct hash_desc *desc;
1144
1145 hp = tcp_get_md5sig_pool();
1146 if (!hp)
1147 goto clear_hash_noput;
1148 desc = &hp->md5_desc;
1149
1150 if (crypto_hash_init(desc))
1151 goto clear_hash;
1152 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1153 goto clear_hash;
1154 if (tcp_md5_hash_header(hp, th))
1155 goto clear_hash;
1156 if (tcp_md5_hash_key(hp, key))
1157 goto clear_hash;
1158 if (crypto_hash_final(desc, md5_hash))
1159 goto clear_hash;
1160
1161 tcp_put_md5sig_pool();
1162 return 0;
1163
1164 clear_hash:
1165 tcp_put_md5sig_pool();
1166 clear_hash_noput:
1167 memset(md5_hash, 0, 16);
1168 return 1;
1169 }
1170
1171 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1172 const struct sock *sk, const struct request_sock *req,
1173 const struct sk_buff *skb)
1174 {
1175 struct tcp_md5sig_pool *hp;
1176 struct hash_desc *desc;
1177 const struct tcphdr *th = tcp_hdr(skb);
1178 __be32 saddr, daddr;
1179
1180 if (sk) {
1181 saddr = inet_sk(sk)->inet_saddr;
1182 daddr = inet_sk(sk)->inet_daddr;
1183 } else if (req) {
1184 saddr = inet_rsk(req)->loc_addr;
1185 daddr = inet_rsk(req)->rmt_addr;
1186 } else {
1187 const struct iphdr *iph = ip_hdr(skb);
1188 saddr = iph->saddr;
1189 daddr = iph->daddr;
1190 }
1191
1192 hp = tcp_get_md5sig_pool();
1193 if (!hp)
1194 goto clear_hash_noput;
1195 desc = &hp->md5_desc;
1196
1197 if (crypto_hash_init(desc))
1198 goto clear_hash;
1199
1200 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1201 goto clear_hash;
1202 if (tcp_md5_hash_header(hp, th))
1203 goto clear_hash;
1204 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1205 goto clear_hash;
1206 if (tcp_md5_hash_key(hp, key))
1207 goto clear_hash;
1208 if (crypto_hash_final(desc, md5_hash))
1209 goto clear_hash;
1210
1211 tcp_put_md5sig_pool();
1212 return 0;
1213
1214 clear_hash:
1215 tcp_put_md5sig_pool();
1216 clear_hash_noput:
1217 memset(md5_hash, 0, 16);
1218 return 1;
1219 }
1220 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1221
1222 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1223 {
1224 /*
1225 * This gets called for each TCP segment that arrives
1226 * so we want to be efficient.
1227 * We have 3 drop cases:
1228 * o No MD5 hash and one expected.
1229 * o MD5 hash and we're not expecting one.
1230 * o MD5 hash and its wrong.
1231 */
1232 const __u8 *hash_location = NULL;
1233 struct tcp_md5sig_key *hash_expected;
1234 const struct iphdr *iph = ip_hdr(skb);
1235 const struct tcphdr *th = tcp_hdr(skb);
1236 int genhash;
1237 unsigned char newhash[16];
1238
1239 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1240 AF_INET);
1241 hash_location = tcp_parse_md5sig_option(th);
1242
1243 /* We've parsed the options - do we have a hash? */
1244 if (!hash_expected && !hash_location)
1245 return false;
1246
1247 if (hash_expected && !hash_location) {
1248 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1249 return true;
1250 }
1251
1252 if (!hash_expected && hash_location) {
1253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1254 return true;
1255 }
1256
1257 /* Okay, so this is hash_expected and hash_location -
1258 * so we need to calculate the checksum.
1259 */
1260 genhash = tcp_v4_md5_hash_skb(newhash,
1261 hash_expected,
1262 NULL, NULL, skb);
1263
1264 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1265 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1266 &iph->saddr, ntohs(th->source),
1267 &iph->daddr, ntohs(th->dest),
1268 genhash ? " tcp_v4_calc_md5_hash failed"
1269 : "");
1270 return true;
1271 }
1272 return false;
1273 }
1274
1275 #endif
1276
1277 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1278 .family = PF_INET,
1279 .obj_size = sizeof(struct tcp_request_sock),
1280 .rtx_syn_ack = tcp_v4_rtx_synack,
1281 .send_ack = tcp_v4_reqsk_send_ack,
1282 .destructor = tcp_v4_reqsk_destructor,
1283 .send_reset = tcp_v4_send_reset,
1284 .syn_ack_timeout = tcp_syn_ack_timeout,
1285 };
1286
1287 #ifdef CONFIG_TCP_MD5SIG
1288 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1289 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1290 .calc_md5_hash = tcp_v4_md5_hash_skb,
1291 };
1292 #endif
1293
1294 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1295 struct request_sock *req,
1296 struct tcp_fastopen_cookie *foc,
1297 struct tcp_fastopen_cookie *valid_foc)
1298 {
1299 bool skip_cookie = false;
1300 struct fastopen_queue *fastopenq;
1301
1302 if (likely(!fastopen_cookie_present(foc))) {
1303 /* See include/net/tcp.h for the meaning of these knobs */
1304 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1305 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1306 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1307 skip_cookie = true; /* no cookie to validate */
1308 else
1309 return false;
1310 }
1311 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1312 /* A FO option is present; bump the counter. */
1313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1314
1315 /* Make sure the listener has enabled fastopen, and we don't
1316 * exceed the max # of pending TFO requests allowed before trying
1317 * to validating the cookie in order to avoid burning CPU cycles
1318 * unnecessarily.
1319 *
1320 * XXX (TFO) - The implication of checking the max_qlen before
1321 * processing a cookie request is that clients can't differentiate
1322 * between qlen overflow causing Fast Open to be disabled
1323 * temporarily vs a server not supporting Fast Open at all.
1324 */
1325 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1326 fastopenq == NULL || fastopenq->max_qlen == 0)
1327 return false;
1328
1329 if (fastopenq->qlen >= fastopenq->max_qlen) {
1330 struct request_sock *req1;
1331 spin_lock(&fastopenq->lock);
1332 req1 = fastopenq->rskq_rst_head;
1333 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1334 spin_unlock(&fastopenq->lock);
1335 NET_INC_STATS_BH(sock_net(sk),
1336 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1337 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1338 foc->len = -1;
1339 return false;
1340 }
1341 fastopenq->rskq_rst_head = req1->dl_next;
1342 fastopenq->qlen--;
1343 spin_unlock(&fastopenq->lock);
1344 reqsk_free(req1);
1345 }
1346 if (skip_cookie) {
1347 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1348 return true;
1349 }
1350 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1351 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1352 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1353 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1354 memcmp(&foc->val[0], &valid_foc->val[0],
1355 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1356 return false;
1357 valid_foc->len = -1;
1358 }
1359 /* Acknowledge the data received from the peer. */
1360 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1361 return true;
1362 } else if (foc->len == 0) { /* Client requesting a cookie */
1363 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1364 NET_INC_STATS_BH(sock_net(sk),
1365 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1366 } else {
1367 /* Client sent a cookie with wrong size. Treat it
1368 * the same as invalid and return a valid one.
1369 */
1370 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1371 }
1372 return false;
1373 }
1374
1375 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1376 struct sk_buff *skb,
1377 struct sk_buff *skb_synack,
1378 struct request_sock *req)
1379 {
1380 struct tcp_sock *tp = tcp_sk(sk);
1381 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1382 const struct inet_request_sock *ireq = inet_rsk(req);
1383 struct sock *child;
1384 int err;
1385
1386 req->num_retrans = 0;
1387 req->num_timeout = 0;
1388 req->sk = NULL;
1389
1390 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1391 if (child == NULL) {
1392 NET_INC_STATS_BH(sock_net(sk),
1393 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1394 kfree_skb(skb_synack);
1395 return -1;
1396 }
1397 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1398 ireq->rmt_addr, ireq->opt);
1399 err = net_xmit_eval(err);
1400 if (!err)
1401 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1402 /* XXX (TFO) - is it ok to ignore error and continue? */
1403
1404 spin_lock(&queue->fastopenq->lock);
1405 queue->fastopenq->qlen++;
1406 spin_unlock(&queue->fastopenq->lock);
1407
1408 /* Initialize the child socket. Have to fix some values to take
1409 * into account the child is a Fast Open socket and is created
1410 * only out of the bits carried in the SYN packet.
1411 */
1412 tp = tcp_sk(child);
1413
1414 tp->fastopen_rsk = req;
1415 /* Do a hold on the listner sk so that if the listener is being
1416 * closed, the child that has been accepted can live on and still
1417 * access listen_lock.
1418 */
1419 sock_hold(sk);
1420 tcp_rsk(req)->listener = sk;
1421
1422 /* RFC1323: The window in SYN & SYN/ACK segments is never
1423 * scaled. So correct it appropriately.
1424 */
1425 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1426 tp->max_window = tp->snd_wnd;
1427
1428 /* Activate the retrans timer so that SYNACK can be retransmitted.
1429 * The request socket is not added to the SYN table of the parent
1430 * because it's been added to the accept queue directly.
1431 */
1432 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1433 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1434
1435 /* Add the child socket directly into the accept queue */
1436 inet_csk_reqsk_queue_add(sk, req, child);
1437
1438 /* Now finish processing the fastopen child socket. */
1439 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1440 tcp_init_congestion_control(child);
1441 tcp_mtup_init(child);
1442 tcp_init_buffer_space(child);
1443 tcp_init_metrics(child);
1444
1445 /* Queue the data carried in the SYN packet. We need to first
1446 * bump skb's refcnt because the caller will attempt to free it.
1447 *
1448 * XXX (TFO) - we honor a zero-payload TFO request for now.
1449 * (Any reason not to?)
1450 */
1451 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1452 /* Don't queue the skb if there is no payload in SYN.
1453 * XXX (TFO) - How about SYN+FIN?
1454 */
1455 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1456 } else {
1457 skb = skb_get(skb);
1458 skb_dst_drop(skb);
1459 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1460 skb_set_owner_r(skb, child);
1461 __skb_queue_tail(&child->sk_receive_queue, skb);
1462 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1463 tp->syn_data_acked = 1;
1464 }
1465 sk->sk_data_ready(sk, 0);
1466 bh_unlock_sock(child);
1467 sock_put(child);
1468 WARN_ON(req->sk == NULL);
1469 return 0;
1470 }
1471
1472 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1473 {
1474 struct tcp_options_received tmp_opt;
1475 struct request_sock *req;
1476 struct inet_request_sock *ireq;
1477 struct tcp_sock *tp = tcp_sk(sk);
1478 struct dst_entry *dst = NULL;
1479 __be32 saddr = ip_hdr(skb)->saddr;
1480 __be32 daddr = ip_hdr(skb)->daddr;
1481 __u32 isn = TCP_SKB_CB(skb)->when;
1482 bool want_cookie = false;
1483 struct flowi4 fl4;
1484 struct tcp_fastopen_cookie foc = { .len = -1 };
1485 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1486 struct sk_buff *skb_synack;
1487 int do_fastopen;
1488
1489 /* Never answer to SYNs send to broadcast or multicast */
1490 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1491 goto drop;
1492
1493 /* TW buckets are converted to open requests without
1494 * limitations, they conserve resources and peer is
1495 * evidently real one.
1496 */
1497 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1498 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1499 if (!want_cookie)
1500 goto drop;
1501 }
1502
1503 /* Accept backlog is full. If we have already queued enough
1504 * of warm entries in syn queue, drop request. It is better than
1505 * clogging syn queue with openreqs with exponentially increasing
1506 * timeout.
1507 */
1508 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1509 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1510 goto drop;
1511 }
1512
1513 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1514 if (!req)
1515 goto drop;
1516
1517 #ifdef CONFIG_TCP_MD5SIG
1518 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1519 #endif
1520
1521 tcp_clear_options(&tmp_opt);
1522 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1523 tmp_opt.user_mss = tp->rx_opt.user_mss;
1524 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1525
1526 if (want_cookie && !tmp_opt.saw_tstamp)
1527 tcp_clear_options(&tmp_opt);
1528
1529 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1530 tcp_openreq_init(req, &tmp_opt, skb);
1531
1532 ireq = inet_rsk(req);
1533 ireq->loc_addr = daddr;
1534 ireq->rmt_addr = saddr;
1535 ireq->no_srccheck = inet_sk(sk)->transparent;
1536 ireq->opt = tcp_v4_save_options(skb);
1537
1538 if (security_inet_conn_request(sk, skb, req))
1539 goto drop_and_free;
1540
1541 if (!want_cookie || tmp_opt.tstamp_ok)
1542 TCP_ECN_create_request(req, skb, sock_net(sk));
1543
1544 if (want_cookie) {
1545 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1546 req->cookie_ts = tmp_opt.tstamp_ok;
1547 } else if (!isn) {
1548 /* VJ's idea. We save last timestamp seen
1549 * from the destination in peer table, when entering
1550 * state TIME-WAIT, and check against it before
1551 * accepting new connection request.
1552 *
1553 * If "isn" is not zero, this request hit alive
1554 * timewait bucket, so that all the necessary checks
1555 * are made in the function processing timewait state.
1556 */
1557 if (tmp_opt.saw_tstamp &&
1558 tcp_death_row.sysctl_tw_recycle &&
1559 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1560 fl4.daddr == saddr) {
1561 if (!tcp_peer_is_proven(req, dst, true)) {
1562 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1563 goto drop_and_release;
1564 }
1565 }
1566 /* Kill the following clause, if you dislike this way. */
1567 else if (!sysctl_tcp_syncookies &&
1568 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1569 (sysctl_max_syn_backlog >> 2)) &&
1570 !tcp_peer_is_proven(req, dst, false)) {
1571 /* Without syncookies last quarter of
1572 * backlog is filled with destinations,
1573 * proven to be alive.
1574 * It means that we continue to communicate
1575 * to destinations, already remembered
1576 * to the moment of synflood.
1577 */
1578 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1579 &saddr, ntohs(tcp_hdr(skb)->source));
1580 goto drop_and_release;
1581 }
1582
1583 isn = tcp_v4_init_sequence(skb);
1584 }
1585 tcp_rsk(req)->snt_isn = isn;
1586
1587 if (dst == NULL) {
1588 dst = inet_csk_route_req(sk, &fl4, req);
1589 if (dst == NULL)
1590 goto drop_and_free;
1591 }
1592 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1593
1594 /* We don't call tcp_v4_send_synack() directly because we need
1595 * to make sure a child socket can be created successfully before
1596 * sending back synack!
1597 *
1598 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1599 * (or better yet, call tcp_send_synack() in the child context
1600 * directly, but will have to fix bunch of other code first)
1601 * after syn_recv_sock() except one will need to first fix the
1602 * latter to remove its dependency on the current implementation
1603 * of tcp_v4_send_synack()->tcp_select_initial_window().
1604 */
1605 skb_synack = tcp_make_synack(sk, dst, req,
1606 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1607
1608 if (skb_synack) {
1609 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1610 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1611 } else
1612 goto drop_and_free;
1613
1614 if (likely(!do_fastopen)) {
1615 int err;
1616 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1617 ireq->rmt_addr, ireq->opt);
1618 err = net_xmit_eval(err);
1619 if (err || want_cookie)
1620 goto drop_and_free;
1621
1622 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1623 tcp_rsk(req)->listener = NULL;
1624 /* Add the request_sock to the SYN table */
1625 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1626 if (fastopen_cookie_present(&foc) && foc.len != 0)
1627 NET_INC_STATS_BH(sock_net(sk),
1628 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1629 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1630 goto drop_and_free;
1631
1632 return 0;
1633
1634 drop_and_release:
1635 dst_release(dst);
1636 drop_and_free:
1637 reqsk_free(req);
1638 drop:
1639 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1640 return 0;
1641 }
1642 EXPORT_SYMBOL(tcp_v4_conn_request);
1643
1644
1645 /*
1646 * The three way handshake has completed - we got a valid synack -
1647 * now create the new socket.
1648 */
1649 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1650 struct request_sock *req,
1651 struct dst_entry *dst)
1652 {
1653 struct inet_request_sock *ireq;
1654 struct inet_sock *newinet;
1655 struct tcp_sock *newtp;
1656 struct sock *newsk;
1657 #ifdef CONFIG_TCP_MD5SIG
1658 struct tcp_md5sig_key *key;
1659 #endif
1660 struct ip_options_rcu *inet_opt;
1661
1662 if (sk_acceptq_is_full(sk))
1663 goto exit_overflow;
1664
1665 newsk = tcp_create_openreq_child(sk, req, skb);
1666 if (!newsk)
1667 goto exit_nonewsk;
1668
1669 newsk->sk_gso_type = SKB_GSO_TCPV4;
1670 inet_sk_rx_dst_set(newsk, skb);
1671
1672 newtp = tcp_sk(newsk);
1673 newinet = inet_sk(newsk);
1674 ireq = inet_rsk(req);
1675 newinet->inet_daddr = ireq->rmt_addr;
1676 newinet->inet_rcv_saddr = ireq->loc_addr;
1677 newinet->inet_saddr = ireq->loc_addr;
1678 inet_opt = ireq->opt;
1679 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1680 ireq->opt = NULL;
1681 newinet->mc_index = inet_iif(skb);
1682 newinet->mc_ttl = ip_hdr(skb)->ttl;
1683 newinet->rcv_tos = ip_hdr(skb)->tos;
1684 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1685 if (inet_opt)
1686 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1687 newinet->inet_id = newtp->write_seq ^ jiffies;
1688
1689 if (!dst) {
1690 dst = inet_csk_route_child_sock(sk, newsk, req);
1691 if (!dst)
1692 goto put_and_exit;
1693 } else {
1694 /* syncookie case : see end of cookie_v4_check() */
1695 }
1696 sk_setup_caps(newsk, dst);
1697
1698 tcp_mtup_init(newsk);
1699 tcp_sync_mss(newsk, dst_mtu(dst));
1700 newtp->advmss = dst_metric_advmss(dst);
1701 if (tcp_sk(sk)->rx_opt.user_mss &&
1702 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1703 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1704
1705 tcp_initialize_rcv_mss(newsk);
1706 tcp_synack_rtt_meas(newsk, req);
1707 newtp->total_retrans = req->num_retrans;
1708
1709 #ifdef CONFIG_TCP_MD5SIG
1710 /* Copy over the MD5 key from the original socket */
1711 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1712 AF_INET);
1713 if (key != NULL) {
1714 /*
1715 * We're using one, so create a matching key
1716 * on the newsk structure. If we fail to get
1717 * memory, then we end up not copying the key
1718 * across. Shucks.
1719 */
1720 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1721 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1722 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1723 }
1724 #endif
1725
1726 if (__inet_inherit_port(sk, newsk) < 0)
1727 goto put_and_exit;
1728 __inet_hash_nolisten(newsk, NULL);
1729
1730 return newsk;
1731
1732 exit_overflow:
1733 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1734 exit_nonewsk:
1735 dst_release(dst);
1736 exit:
1737 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1738 return NULL;
1739 put_and_exit:
1740 inet_csk_prepare_forced_close(newsk);
1741 tcp_done(newsk);
1742 goto exit;
1743 }
1744 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1745
1746 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1747 {
1748 struct tcphdr *th = tcp_hdr(skb);
1749 const struct iphdr *iph = ip_hdr(skb);
1750 struct sock *nsk;
1751 struct request_sock **prev;
1752 /* Find possible connection requests. */
1753 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1754 iph->saddr, iph->daddr);
1755 if (req)
1756 return tcp_check_req(sk, skb, req, prev, false);
1757
1758 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1759 th->source, iph->daddr, th->dest, inet_iif(skb));
1760
1761 if (nsk) {
1762 if (nsk->sk_state != TCP_TIME_WAIT) {
1763 bh_lock_sock(nsk);
1764 return nsk;
1765 }
1766 inet_twsk_put(inet_twsk(nsk));
1767 return NULL;
1768 }
1769
1770 #ifdef CONFIG_SYN_COOKIES
1771 if (!th->syn)
1772 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1773 #endif
1774 return sk;
1775 }
1776
1777 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1778 {
1779 const struct iphdr *iph = ip_hdr(skb);
1780
1781 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1782 if (!tcp_v4_check(skb->len, iph->saddr,
1783 iph->daddr, skb->csum)) {
1784 skb->ip_summed = CHECKSUM_UNNECESSARY;
1785 return 0;
1786 }
1787 }
1788
1789 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1790 skb->len, IPPROTO_TCP, 0);
1791
1792 if (skb->len <= 76) {
1793 return __skb_checksum_complete(skb);
1794 }
1795 return 0;
1796 }
1797
1798
1799 /* The socket must have it's spinlock held when we get
1800 * here.
1801 *
1802 * We have a potential double-lock case here, so even when
1803 * doing backlog processing we use the BH locking scheme.
1804 * This is because we cannot sleep with the original spinlock
1805 * held.
1806 */
1807 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1808 {
1809 struct sock *rsk;
1810 #ifdef CONFIG_TCP_MD5SIG
1811 /*
1812 * We really want to reject the packet as early as possible
1813 * if:
1814 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1815 * o There is an MD5 option and we're not expecting one
1816 */
1817 if (tcp_v4_inbound_md5_hash(sk, skb))
1818 goto discard;
1819 #endif
1820
1821 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1822 struct dst_entry *dst = sk->sk_rx_dst;
1823
1824 sock_rps_save_rxhash(sk, skb);
1825 if (dst) {
1826 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1827 dst->ops->check(dst, 0) == NULL) {
1828 dst_release(dst);
1829 sk->sk_rx_dst = NULL;
1830 }
1831 }
1832 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1833 rsk = sk;
1834 goto reset;
1835 }
1836 return 0;
1837 }
1838
1839 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1840 goto csum_err;
1841
1842 if (sk->sk_state == TCP_LISTEN) {
1843 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1844 if (!nsk)
1845 goto discard;
1846
1847 if (nsk != sk) {
1848 sock_rps_save_rxhash(nsk, skb);
1849 if (tcp_child_process(sk, nsk, skb)) {
1850 rsk = nsk;
1851 goto reset;
1852 }
1853 return 0;
1854 }
1855 } else
1856 sock_rps_save_rxhash(sk, skb);
1857
1858 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1859 rsk = sk;
1860 goto reset;
1861 }
1862 return 0;
1863
1864 reset:
1865 tcp_v4_send_reset(rsk, skb);
1866 discard:
1867 kfree_skb(skb);
1868 /* Be careful here. If this function gets more complicated and
1869 * gcc suffers from register pressure on the x86, sk (in %ebx)
1870 * might be destroyed here. This current version compiles correctly,
1871 * but you have been warned.
1872 */
1873 return 0;
1874
1875 csum_err:
1876 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1877 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1878 goto discard;
1879 }
1880 EXPORT_SYMBOL(tcp_v4_do_rcv);
1881
1882 void tcp_v4_early_demux(struct sk_buff *skb)
1883 {
1884 const struct iphdr *iph;
1885 const struct tcphdr *th;
1886 struct sock *sk;
1887
1888 if (skb->pkt_type != PACKET_HOST)
1889 return;
1890
1891 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1892 return;
1893
1894 iph = ip_hdr(skb);
1895 th = tcp_hdr(skb);
1896
1897 if (th->doff < sizeof(struct tcphdr) / 4)
1898 return;
1899
1900 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1901 iph->saddr, th->source,
1902 iph->daddr, ntohs(th->dest),
1903 skb->skb_iif);
1904 if (sk) {
1905 skb->sk = sk;
1906 skb->destructor = sock_edemux;
1907 if (sk->sk_state != TCP_TIME_WAIT) {
1908 struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
1909
1910 if (dst)
1911 dst = dst_check(dst, 0);
1912 if (dst &&
1913 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1914 skb_dst_set_noref(skb, dst);
1915 }
1916 }
1917 }
1918
1919 /* Packet is added to VJ-style prequeue for processing in process
1920 * context, if a reader task is waiting. Apparently, this exciting
1921 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1922 * failed somewhere. Latency? Burstiness? Well, at least now we will
1923 * see, why it failed. 8)8) --ANK
1924 *
1925 */
1926 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1927 {
1928 struct tcp_sock *tp = tcp_sk(sk);
1929
1930 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1931 return false;
1932
1933 if (skb->len <= tcp_hdrlen(skb) &&
1934 skb_queue_len(&tp->ucopy.prequeue) == 0)
1935 return false;
1936
1937 skb_dst_force(skb);
1938 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1939 tp->ucopy.memory += skb->truesize;
1940 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1941 struct sk_buff *skb1;
1942
1943 BUG_ON(sock_owned_by_user(sk));
1944
1945 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1946 sk_backlog_rcv(sk, skb1);
1947 NET_INC_STATS_BH(sock_net(sk),
1948 LINUX_MIB_TCPPREQUEUEDROPPED);
1949 }
1950
1951 tp->ucopy.memory = 0;
1952 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1953 wake_up_interruptible_sync_poll(sk_sleep(sk),
1954 POLLIN | POLLRDNORM | POLLRDBAND);
1955 if (!inet_csk_ack_scheduled(sk))
1956 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1957 (3 * tcp_rto_min(sk)) / 4,
1958 TCP_RTO_MAX);
1959 }
1960 return true;
1961 }
1962 EXPORT_SYMBOL(tcp_prequeue);
1963
1964 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1965 {
1966 struct tcphdr *th = (struct tcphdr *)skb->data;
1967 unsigned int eaten = skb->len;
1968 int err;
1969
1970 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1971 if (!err) {
1972 eaten -= skb->len;
1973 TCP_SKB_CB(skb)->end_seq -= eaten;
1974 }
1975 return err;
1976 }
1977 EXPORT_SYMBOL(tcp_filter);
1978
1979 /*
1980 * From tcp_input.c
1981 */
1982
1983 int tcp_v4_rcv(struct sk_buff *skb)
1984 {
1985 const struct iphdr *iph;
1986 const struct tcphdr *th;
1987 struct sock *sk;
1988 int ret;
1989 struct net *net = dev_net(skb->dev);
1990
1991 if (skb->pkt_type != PACKET_HOST)
1992 goto discard_it;
1993
1994 /* Count it even if it's bad */
1995 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1996
1997 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1998 goto discard_it;
1999
2000 th = tcp_hdr(skb);
2001
2002 if (th->doff < sizeof(struct tcphdr) / 4)
2003 goto bad_packet;
2004 if (!pskb_may_pull(skb, th->doff * 4))
2005 goto discard_it;
2006
2007 /* An explanation is required here, I think.
2008 * Packet length and doff are validated by header prediction,
2009 * provided case of th->doff==0 is eliminated.
2010 * So, we defer the checks. */
2011 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
2012 goto csum_error;
2013
2014 th = tcp_hdr(skb);
2015 iph = ip_hdr(skb);
2016 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
2017 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
2018 skb->len - th->doff * 4);
2019 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2020 TCP_SKB_CB(skb)->when = 0;
2021 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2022 TCP_SKB_CB(skb)->sacked = 0;
2023
2024 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2025 if (!sk)
2026 goto no_tcp_socket;
2027
2028 process:
2029 if (sk->sk_state == TCP_TIME_WAIT)
2030 goto do_time_wait;
2031
2032 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2033 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2034 goto discard_and_relse;
2035 }
2036
2037 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2038 goto discard_and_relse;
2039 nf_reset(skb);
2040
2041 if (tcp_filter(sk, skb))
2042 goto discard_and_relse;
2043 th = (const struct tcphdr *)skb->data;
2044 iph = ip_hdr(skb);
2045
2046 skb->dev = NULL;
2047
2048 bh_lock_sock_nested(sk);
2049 ret = 0;
2050 if (!sock_owned_by_user(sk)) {
2051 #ifdef CONFIG_NET_DMA
2052 struct tcp_sock *tp = tcp_sk(sk);
2053 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2054 tp->ucopy.dma_chan = net_dma_find_channel();
2055 if (tp->ucopy.dma_chan)
2056 ret = tcp_v4_do_rcv(sk, skb);
2057 else
2058 #endif
2059 {
2060 if (!tcp_prequeue(sk, skb))
2061 ret = tcp_v4_do_rcv(sk, skb);
2062 }
2063 } else if (unlikely(sk_add_backlog(sk, skb,
2064 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2065 bh_unlock_sock(sk);
2066 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2067 goto discard_and_relse;
2068 }
2069 bh_unlock_sock(sk);
2070
2071 sock_put(sk);
2072
2073 return ret;
2074
2075 no_tcp_socket:
2076 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2077 goto discard_it;
2078
2079 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2080 csum_error:
2081 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
2082 bad_packet:
2083 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2084 } else {
2085 tcp_v4_send_reset(NULL, skb);
2086 }
2087
2088 discard_it:
2089 /* Discard frame. */
2090 kfree_skb(skb);
2091 return 0;
2092
2093 discard_and_relse:
2094 sock_put(sk);
2095 goto discard_it;
2096
2097 do_time_wait:
2098 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2099 inet_twsk_put(inet_twsk(sk));
2100 goto discard_it;
2101 }
2102
2103 if (skb->len < (th->doff << 2)) {
2104 inet_twsk_put(inet_twsk(sk));
2105 goto bad_packet;
2106 }
2107 if (tcp_checksum_complete(skb)) {
2108 inet_twsk_put(inet_twsk(sk));
2109 goto csum_error;
2110 }
2111 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2112 case TCP_TW_SYN: {
2113 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2114 &tcp_hashinfo,
2115 iph->saddr, th->source,
2116 iph->daddr, th->dest,
2117 inet_iif(skb));
2118 if (sk2) {
2119 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2120 inet_twsk_put(inet_twsk(sk));
2121 sk = sk2;
2122 goto process;
2123 }
2124 /* Fall through to ACK */
2125 }
2126 case TCP_TW_ACK:
2127 tcp_v4_timewait_ack(sk, skb);
2128 break;
2129 case TCP_TW_RST:
2130 goto no_tcp_socket;
2131 case TCP_TW_SUCCESS:;
2132 }
2133 goto discard_it;
2134 }
2135
2136 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2137 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2138 .twsk_unique = tcp_twsk_unique,
2139 .twsk_destructor= tcp_twsk_destructor,
2140 };
2141
2142 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2143 {
2144 struct dst_entry *dst = skb_dst(skb);
2145
2146 dst_hold(dst);
2147 sk->sk_rx_dst = dst;
2148 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2149 }
2150 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2151
2152 const struct inet_connection_sock_af_ops ipv4_specific = {
2153 .queue_xmit = ip_queue_xmit,
2154 .send_check = tcp_v4_send_check,
2155 .rebuild_header = inet_sk_rebuild_header,
2156 .sk_rx_dst_set = inet_sk_rx_dst_set,
2157 .conn_request = tcp_v4_conn_request,
2158 .syn_recv_sock = tcp_v4_syn_recv_sock,
2159 .net_header_len = sizeof(struct iphdr),
2160 .setsockopt = ip_setsockopt,
2161 .getsockopt = ip_getsockopt,
2162 .addr2sockaddr = inet_csk_addr2sockaddr,
2163 .sockaddr_len = sizeof(struct sockaddr_in),
2164 .bind_conflict = inet_csk_bind_conflict,
2165 #ifdef CONFIG_COMPAT
2166 .compat_setsockopt = compat_ip_setsockopt,
2167 .compat_getsockopt = compat_ip_getsockopt,
2168 #endif
2169 .mtu_reduced = tcp_v4_mtu_reduced,
2170 };
2171 EXPORT_SYMBOL(ipv4_specific);
2172
2173 #ifdef CONFIG_TCP_MD5SIG
2174 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2175 .md5_lookup = tcp_v4_md5_lookup,
2176 .calc_md5_hash = tcp_v4_md5_hash_skb,
2177 .md5_parse = tcp_v4_parse_md5_keys,
2178 };
2179 #endif
2180
2181 /* NOTE: A lot of things set to zero explicitly by call to
2182 * sk_alloc() so need not be done here.
2183 */
2184 static int tcp_v4_init_sock(struct sock *sk)
2185 {
2186 struct inet_connection_sock *icsk = inet_csk(sk);
2187
2188 tcp_init_sock(sk);
2189
2190 icsk->icsk_af_ops = &ipv4_specific;
2191
2192 #ifdef CONFIG_TCP_MD5SIG
2193 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2194 #endif
2195
2196 return 0;
2197 }
2198
2199 void tcp_v4_destroy_sock(struct sock *sk)
2200 {
2201 struct tcp_sock *tp = tcp_sk(sk);
2202
2203 tcp_clear_xmit_timers(sk);
2204
2205 tcp_cleanup_congestion_control(sk);
2206
2207 /* Cleanup up the write buffer. */
2208 tcp_write_queue_purge(sk);
2209
2210 /* Cleans up our, hopefully empty, out_of_order_queue. */
2211 __skb_queue_purge(&tp->out_of_order_queue);
2212
2213 #ifdef CONFIG_TCP_MD5SIG
2214 /* Clean up the MD5 key list, if any */
2215 if (tp->md5sig_info) {
2216 tcp_clear_md5_list(sk);
2217 kfree_rcu(tp->md5sig_info, rcu);
2218 tp->md5sig_info = NULL;
2219 }
2220 #endif
2221
2222 #ifdef CONFIG_NET_DMA
2223 /* Cleans up our sk_async_wait_queue */
2224 __skb_queue_purge(&sk->sk_async_wait_queue);
2225 #endif
2226
2227 /* Clean prequeue, it must be empty really */
2228 __skb_queue_purge(&tp->ucopy.prequeue);
2229
2230 /* Clean up a referenced TCP bind bucket. */
2231 if (inet_csk(sk)->icsk_bind_hash)
2232 inet_put_port(sk);
2233
2234 BUG_ON(tp->fastopen_rsk != NULL);
2235
2236 /* If socket is aborted during connect operation */
2237 tcp_free_fastopen_req(tp);
2238
2239 sk_sockets_allocated_dec(sk);
2240 sock_release_memcg(sk);
2241 }
2242 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2243
2244 #ifdef CONFIG_PROC_FS
2245 /* Proc filesystem TCP sock list dumping. */
2246
2247 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2248 {
2249 return hlist_nulls_empty(head) ? NULL :
2250 list_entry(head->first, struct inet_timewait_sock, tw_node);
2251 }
2252
2253 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2254 {
2255 return !is_a_nulls(tw->tw_node.next) ?
2256 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2257 }
2258
2259 /*
2260 * Get next listener socket follow cur. If cur is NULL, get first socket
2261 * starting from bucket given in st->bucket; when st->bucket is zero the
2262 * very first socket in the hash table is returned.
2263 */
2264 static void *listening_get_next(struct seq_file *seq, void *cur)
2265 {
2266 struct inet_connection_sock *icsk;
2267 struct hlist_nulls_node *node;
2268 struct sock *sk = cur;
2269 struct inet_listen_hashbucket *ilb;
2270 struct tcp_iter_state *st = seq->private;
2271 struct net *net = seq_file_net(seq);
2272
2273 if (!sk) {
2274 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2275 spin_lock_bh(&ilb->lock);
2276 sk = sk_nulls_head(&ilb->head);
2277 st->offset = 0;
2278 goto get_sk;
2279 }
2280 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2281 ++st->num;
2282 ++st->offset;
2283
2284 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2285 struct request_sock *req = cur;
2286
2287 icsk = inet_csk(st->syn_wait_sk);
2288 req = req->dl_next;
2289 while (1) {
2290 while (req) {
2291 if (req->rsk_ops->family == st->family) {
2292 cur = req;
2293 goto out;
2294 }
2295 req = req->dl_next;
2296 }
2297 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2298 break;
2299 get_req:
2300 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2301 }
2302 sk = sk_nulls_next(st->syn_wait_sk);
2303 st->state = TCP_SEQ_STATE_LISTENING;
2304 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2305 } else {
2306 icsk = inet_csk(sk);
2307 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2308 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2309 goto start_req;
2310 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2311 sk = sk_nulls_next(sk);
2312 }
2313 get_sk:
2314 sk_nulls_for_each_from(sk, node) {
2315 if (!net_eq(sock_net(sk), net))
2316 continue;
2317 if (sk->sk_family == st->family) {
2318 cur = sk;
2319 goto out;
2320 }
2321 icsk = inet_csk(sk);
2322 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2323 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2324 start_req:
2325 st->uid = sock_i_uid(sk);
2326 st->syn_wait_sk = sk;
2327 st->state = TCP_SEQ_STATE_OPENREQ;
2328 st->sbucket = 0;
2329 goto get_req;
2330 }
2331 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2332 }
2333 spin_unlock_bh(&ilb->lock);
2334 st->offset = 0;
2335 if (++st->bucket < INET_LHTABLE_SIZE) {
2336 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2337 spin_lock_bh(&ilb->lock);
2338 sk = sk_nulls_head(&ilb->head);
2339 goto get_sk;
2340 }
2341 cur = NULL;
2342 out:
2343 return cur;
2344 }
2345
2346 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2347 {
2348 struct tcp_iter_state *st = seq->private;
2349 void *rc;
2350
2351 st->bucket = 0;
2352 st->offset = 0;
2353 rc = listening_get_next(seq, NULL);
2354
2355 while (rc && *pos) {
2356 rc = listening_get_next(seq, rc);
2357 --*pos;
2358 }
2359 return rc;
2360 }
2361
2362 static inline bool empty_bucket(struct tcp_iter_state *st)
2363 {
2364 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2365 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2366 }
2367
2368 /*
2369 * Get first established socket starting from bucket given in st->bucket.
2370 * If st->bucket is zero, the very first socket in the hash is returned.
2371 */
2372 static void *established_get_first(struct seq_file *seq)
2373 {
2374 struct tcp_iter_state *st = seq->private;
2375 struct net *net = seq_file_net(seq);
2376 void *rc = NULL;
2377
2378 st->offset = 0;
2379 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2380 struct sock *sk;
2381 struct hlist_nulls_node *node;
2382 struct inet_timewait_sock *tw;
2383 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2384
2385 /* Lockless fast path for the common case of empty buckets */
2386 if (empty_bucket(st))
2387 continue;
2388
2389 spin_lock_bh(lock);
2390 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2391 if (sk->sk_family != st->family ||
2392 !net_eq(sock_net(sk), net)) {
2393 continue;
2394 }
2395 rc = sk;
2396 goto out;
2397 }
2398 st->state = TCP_SEQ_STATE_TIME_WAIT;
2399 inet_twsk_for_each(tw, node,
2400 &tcp_hashinfo.ehash[st->bucket].twchain) {
2401 if (tw->tw_family != st->family ||
2402 !net_eq(twsk_net(tw), net)) {
2403 continue;
2404 }
2405 rc = tw;
2406 goto out;
2407 }
2408 spin_unlock_bh(lock);
2409 st->state = TCP_SEQ_STATE_ESTABLISHED;
2410 }
2411 out:
2412 return rc;
2413 }
2414
2415 static void *established_get_next(struct seq_file *seq, void *cur)
2416 {
2417 struct sock *sk = cur;
2418 struct inet_timewait_sock *tw;
2419 struct hlist_nulls_node *node;
2420 struct tcp_iter_state *st = seq->private;
2421 struct net *net = seq_file_net(seq);
2422
2423 ++st->num;
2424 ++st->offset;
2425
2426 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2427 tw = cur;
2428 tw = tw_next(tw);
2429 get_tw:
2430 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2431 tw = tw_next(tw);
2432 }
2433 if (tw) {
2434 cur = tw;
2435 goto out;
2436 }
2437 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2438 st->state = TCP_SEQ_STATE_ESTABLISHED;
2439
2440 /* Look for next non empty bucket */
2441 st->offset = 0;
2442 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2443 empty_bucket(st))
2444 ;
2445 if (st->bucket > tcp_hashinfo.ehash_mask)
2446 return NULL;
2447
2448 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2449 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2450 } else
2451 sk = sk_nulls_next(sk);
2452
2453 sk_nulls_for_each_from(sk, node) {
2454 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2455 goto found;
2456 }
2457
2458 st->state = TCP_SEQ_STATE_TIME_WAIT;
2459 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2460 goto get_tw;
2461 found:
2462 cur = sk;
2463 out:
2464 return cur;
2465 }
2466
2467 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2468 {
2469 struct tcp_iter_state *st = seq->private;
2470 void *rc;
2471
2472 st->bucket = 0;
2473 rc = established_get_first(seq);
2474
2475 while (rc && pos) {
2476 rc = established_get_next(seq, rc);
2477 --pos;
2478 }
2479 return rc;
2480 }
2481
2482 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2483 {
2484 void *rc;
2485 struct tcp_iter_state *st = seq->private;
2486
2487 st->state = TCP_SEQ_STATE_LISTENING;
2488 rc = listening_get_idx(seq, &pos);
2489
2490 if (!rc) {
2491 st->state = TCP_SEQ_STATE_ESTABLISHED;
2492 rc = established_get_idx(seq, pos);
2493 }
2494
2495 return rc;
2496 }
2497
2498 static void *tcp_seek_last_pos(struct seq_file *seq)
2499 {
2500 struct tcp_iter_state *st = seq->private;
2501 int offset = st->offset;
2502 int orig_num = st->num;
2503 void *rc = NULL;
2504
2505 switch (st->state) {
2506 case TCP_SEQ_STATE_OPENREQ:
2507 case TCP_SEQ_STATE_LISTENING:
2508 if (st->bucket >= INET_LHTABLE_SIZE)
2509 break;
2510 st->state = TCP_SEQ_STATE_LISTENING;
2511 rc = listening_get_next(seq, NULL);
2512 while (offset-- && rc)
2513 rc = listening_get_next(seq, rc);
2514 if (rc)
2515 break;
2516 st->bucket = 0;
2517 /* Fallthrough */
2518 case TCP_SEQ_STATE_ESTABLISHED:
2519 case TCP_SEQ_STATE_TIME_WAIT:
2520 st->state = TCP_SEQ_STATE_ESTABLISHED;
2521 if (st->bucket > tcp_hashinfo.ehash_mask)
2522 break;
2523 rc = established_get_first(seq);
2524 while (offset-- && rc)
2525 rc = established_get_next(seq, rc);
2526 }
2527
2528 st->num = orig_num;
2529
2530 return rc;
2531 }
2532
2533 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2534 {
2535 struct tcp_iter_state *st = seq->private;
2536 void *rc;
2537
2538 if (*pos && *pos == st->last_pos) {
2539 rc = tcp_seek_last_pos(seq);
2540 if (rc)
2541 goto out;
2542 }
2543
2544 st->state = TCP_SEQ_STATE_LISTENING;
2545 st->num = 0;
2546 st->bucket = 0;
2547 st->offset = 0;
2548 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2549
2550 out:
2551 st->last_pos = *pos;
2552 return rc;
2553 }
2554
2555 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2556 {
2557 struct tcp_iter_state *st = seq->private;
2558 void *rc = NULL;
2559
2560 if (v == SEQ_START_TOKEN) {
2561 rc = tcp_get_idx(seq, 0);
2562 goto out;
2563 }
2564
2565 switch (st->state) {
2566 case TCP_SEQ_STATE_OPENREQ:
2567 case TCP_SEQ_STATE_LISTENING:
2568 rc = listening_get_next(seq, v);
2569 if (!rc) {
2570 st->state = TCP_SEQ_STATE_ESTABLISHED;
2571 st->bucket = 0;
2572 st->offset = 0;
2573 rc = established_get_first(seq);
2574 }
2575 break;
2576 case TCP_SEQ_STATE_ESTABLISHED:
2577 case TCP_SEQ_STATE_TIME_WAIT:
2578 rc = established_get_next(seq, v);
2579 break;
2580 }
2581 out:
2582 ++*pos;
2583 st->last_pos = *pos;
2584 return rc;
2585 }
2586
2587 static void tcp_seq_stop(struct seq_file *seq, void *v)
2588 {
2589 struct tcp_iter_state *st = seq->private;
2590
2591 switch (st->state) {
2592 case TCP_SEQ_STATE_OPENREQ:
2593 if (v) {
2594 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2595 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2596 }
2597 case TCP_SEQ_STATE_LISTENING:
2598 if (v != SEQ_START_TOKEN)
2599 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2600 break;
2601 case TCP_SEQ_STATE_TIME_WAIT:
2602 case TCP_SEQ_STATE_ESTABLISHED:
2603 if (v)
2604 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2605 break;
2606 }
2607 }
2608
2609 int tcp_seq_open(struct inode *inode, struct file *file)
2610 {
2611 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2612 struct tcp_iter_state *s;
2613 int err;
2614
2615 err = seq_open_net(inode, file, &afinfo->seq_ops,
2616 sizeof(struct tcp_iter_state));
2617 if (err < 0)
2618 return err;
2619
2620 s = ((struct seq_file *)file->private_data)->private;
2621 s->family = afinfo->family;
2622 s->last_pos = 0;
2623 return 0;
2624 }
2625 EXPORT_SYMBOL(tcp_seq_open);
2626
2627 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2628 {
2629 int rc = 0;
2630 struct proc_dir_entry *p;
2631
2632 afinfo->seq_ops.start = tcp_seq_start;
2633 afinfo->seq_ops.next = tcp_seq_next;
2634 afinfo->seq_ops.stop = tcp_seq_stop;
2635
2636 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2637 afinfo->seq_fops, afinfo);
2638 if (!p)
2639 rc = -ENOMEM;
2640 return rc;
2641 }
2642 EXPORT_SYMBOL(tcp_proc_register);
2643
2644 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2645 {
2646 remove_proc_entry(afinfo->name, net->proc_net);
2647 }
2648 EXPORT_SYMBOL(tcp_proc_unregister);
2649
2650 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2651 struct seq_file *f, int i, kuid_t uid, int *len)
2652 {
2653 const struct inet_request_sock *ireq = inet_rsk(req);
2654 long delta = req->expires - jiffies;
2655
2656 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2657 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2658 i,
2659 ireq->loc_addr,
2660 ntohs(inet_sk(sk)->inet_sport),
2661 ireq->rmt_addr,
2662 ntohs(ireq->rmt_port),
2663 TCP_SYN_RECV,
2664 0, 0, /* could print option size, but that is af dependent. */
2665 1, /* timers active (only the expire timer) */
2666 jiffies_delta_to_clock_t(delta),
2667 req->num_timeout,
2668 from_kuid_munged(seq_user_ns(f), uid),
2669 0, /* non standard timer */
2670 0, /* open_requests have no inode */
2671 atomic_read(&sk->sk_refcnt),
2672 req,
2673 len);
2674 }
2675
2676 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2677 {
2678 int timer_active;
2679 unsigned long timer_expires;
2680 const struct tcp_sock *tp = tcp_sk(sk);
2681 const struct inet_connection_sock *icsk = inet_csk(sk);
2682 const struct inet_sock *inet = inet_sk(sk);
2683 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2684 __be32 dest = inet->inet_daddr;
2685 __be32 src = inet->inet_rcv_saddr;
2686 __u16 destp = ntohs(inet->inet_dport);
2687 __u16 srcp = ntohs(inet->inet_sport);
2688 int rx_queue;
2689
2690 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2691 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2692 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2693 timer_active = 1;
2694 timer_expires = icsk->icsk_timeout;
2695 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2696 timer_active = 4;
2697 timer_expires = icsk->icsk_timeout;
2698 } else if (timer_pending(&sk->sk_timer)) {
2699 timer_active = 2;
2700 timer_expires = sk->sk_timer.expires;
2701 } else {
2702 timer_active = 0;
2703 timer_expires = jiffies;
2704 }
2705
2706 if (sk->sk_state == TCP_LISTEN)
2707 rx_queue = sk->sk_ack_backlog;
2708 else
2709 /*
2710 * because we dont lock socket, we might find a transient negative value
2711 */
2712 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2713
2714 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2715 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2716 i, src, srcp, dest, destp, sk->sk_state,
2717 tp->write_seq - tp->snd_una,
2718 rx_queue,
2719 timer_active,
2720 jiffies_delta_to_clock_t(timer_expires - jiffies),
2721 icsk->icsk_retransmits,
2722 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2723 icsk->icsk_probes_out,
2724 sock_i_ino(sk),
2725 atomic_read(&sk->sk_refcnt), sk,
2726 jiffies_to_clock_t(icsk->icsk_rto),
2727 jiffies_to_clock_t(icsk->icsk_ack.ato),
2728 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2729 tp->snd_cwnd,
2730 sk->sk_state == TCP_LISTEN ?
2731 (fastopenq ? fastopenq->max_qlen : 0) :
2732 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2733 len);
2734 }
2735
2736 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2737 struct seq_file *f, int i, int *len)
2738 {
2739 __be32 dest, src;
2740 __u16 destp, srcp;
2741 long delta = tw->tw_ttd - jiffies;
2742
2743 dest = tw->tw_daddr;
2744 src = tw->tw_rcv_saddr;
2745 destp = ntohs(tw->tw_dport);
2746 srcp = ntohs(tw->tw_sport);
2747
2748 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2749 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2750 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2751 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2752 atomic_read(&tw->tw_refcnt), tw, len);
2753 }
2754
2755 #define TMPSZ 150
2756
2757 static int tcp4_seq_show(struct seq_file *seq, void *v)
2758 {
2759 struct tcp_iter_state *st;
2760 int len;
2761
2762 if (v == SEQ_START_TOKEN) {
2763 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2764 " sl local_address rem_address st tx_queue "
2765 "rx_queue tr tm->when retrnsmt uid timeout "
2766 "inode");
2767 goto out;
2768 }
2769 st = seq->private;
2770
2771 switch (st->state) {
2772 case TCP_SEQ_STATE_LISTENING:
2773 case TCP_SEQ_STATE_ESTABLISHED:
2774 get_tcp4_sock(v, seq, st->num, &len);
2775 break;
2776 case TCP_SEQ_STATE_OPENREQ:
2777 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2778 break;
2779 case TCP_SEQ_STATE_TIME_WAIT:
2780 get_timewait4_sock(v, seq, st->num, &len);
2781 break;
2782 }
2783 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2784 out:
2785 return 0;
2786 }
2787
2788 static const struct file_operations tcp_afinfo_seq_fops = {
2789 .owner = THIS_MODULE,
2790 .open = tcp_seq_open,
2791 .read = seq_read,
2792 .llseek = seq_lseek,
2793 .release = seq_release_net
2794 };
2795
2796 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2797 .name = "tcp",
2798 .family = AF_INET,
2799 .seq_fops = &tcp_afinfo_seq_fops,
2800 .seq_ops = {
2801 .show = tcp4_seq_show,
2802 },
2803 };
2804
2805 static int __net_init tcp4_proc_init_net(struct net *net)
2806 {
2807 return tcp_proc_register(net, &tcp4_seq_afinfo);
2808 }
2809
2810 static void __net_exit tcp4_proc_exit_net(struct net *net)
2811 {
2812 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2813 }
2814
2815 static struct pernet_operations tcp4_net_ops = {
2816 .init = tcp4_proc_init_net,
2817 .exit = tcp4_proc_exit_net,
2818 };
2819
2820 int __init tcp4_proc_init(void)
2821 {
2822 return register_pernet_subsys(&tcp4_net_ops);
2823 }
2824
2825 void tcp4_proc_exit(void)
2826 {
2827 unregister_pernet_subsys(&tcp4_net_ops);
2828 }
2829 #endif /* CONFIG_PROC_FS */
2830
2831 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2832 {
2833 const struct iphdr *iph = skb_gro_network_header(skb);
2834 __wsum wsum;
2835 __sum16 sum;
2836
2837 switch (skb->ip_summed) {
2838 case CHECKSUM_COMPLETE:
2839 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2840 skb->csum)) {
2841 skb->ip_summed = CHECKSUM_UNNECESSARY;
2842 break;
2843 }
2844 flush:
2845 NAPI_GRO_CB(skb)->flush = 1;
2846 return NULL;
2847
2848 case CHECKSUM_NONE:
2849 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2850 skb_gro_len(skb), IPPROTO_TCP, 0);
2851 sum = csum_fold(skb_checksum(skb,
2852 skb_gro_offset(skb),
2853 skb_gro_len(skb),
2854 wsum));
2855 if (sum)
2856 goto flush;
2857
2858 skb->ip_summed = CHECKSUM_UNNECESSARY;
2859 break;
2860 }
2861
2862 return tcp_gro_receive(head, skb);
2863 }
2864
2865 int tcp4_gro_complete(struct sk_buff *skb)
2866 {
2867 const struct iphdr *iph = ip_hdr(skb);
2868 struct tcphdr *th = tcp_hdr(skb);
2869
2870 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2871 iph->saddr, iph->daddr, 0);
2872 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2873
2874 return tcp_gro_complete(skb);
2875 }
2876
2877 struct proto tcp_prot = {
2878 .name = "TCP",
2879 .owner = THIS_MODULE,
2880 .close = tcp_close,
2881 .connect = tcp_v4_connect,
2882 .disconnect = tcp_disconnect,
2883 .accept = inet_csk_accept,
2884 .ioctl = tcp_ioctl,
2885 .init = tcp_v4_init_sock,
2886 .destroy = tcp_v4_destroy_sock,
2887 .shutdown = tcp_shutdown,
2888 .setsockopt = tcp_setsockopt,
2889 .getsockopt = tcp_getsockopt,
2890 .recvmsg = tcp_recvmsg,
2891 .sendmsg = tcp_sendmsg,
2892 .sendpage = tcp_sendpage,
2893 .backlog_rcv = tcp_v4_do_rcv,
2894 .release_cb = tcp_release_cb,
2895 .hash = inet_hash,
2896 .unhash = inet_unhash,
2897 .get_port = inet_csk_get_port,
2898 .enter_memory_pressure = tcp_enter_memory_pressure,
2899 .sockets_allocated = &tcp_sockets_allocated,
2900 .orphan_count = &tcp_orphan_count,
2901 .memory_allocated = &tcp_memory_allocated,
2902 .memory_pressure = &tcp_memory_pressure,
2903 .sysctl_wmem = sysctl_tcp_wmem,
2904 .sysctl_rmem = sysctl_tcp_rmem,
2905 .max_header = MAX_TCP_HEADER,
2906 .obj_size = sizeof(struct tcp_sock),
2907 .slab_flags = SLAB_DESTROY_BY_RCU,
2908 .twsk_prot = &tcp_timewait_sock_ops,
2909 .rsk_prot = &tcp_request_sock_ops,
2910 .h.hashinfo = &tcp_hashinfo,
2911 .no_autobind = true,
2912 #ifdef CONFIG_COMPAT
2913 .compat_setsockopt = compat_tcp_setsockopt,
2914 .compat_getsockopt = compat_tcp_getsockopt,
2915 #endif
2916 #ifdef CONFIG_MEMCG_KMEM
2917 .init_cgroup = tcp_init_cgroup,
2918 .destroy_cgroup = tcp_destroy_cgroup,
2919 .proto_cgroup = tcp_proto_cgroup,
2920 #endif
2921 };
2922 EXPORT_SYMBOL(tcp_prot);
2923
2924 static void __net_exit tcp_sk_exit(struct net *net)
2925 {
2926 int cpu;
2927
2928 for_each_possible_cpu(cpu)
2929 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2930 free_percpu(net->ipv4.tcp_sk);
2931 }
2932
2933 static int __net_init tcp_sk_init(struct net *net)
2934 {
2935 int res, cpu;
2936
2937 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2938 if (!net->ipv4.tcp_sk)
2939 return -ENOMEM;
2940
2941 for_each_possible_cpu(cpu) {
2942 struct sock *sk;
2943
2944 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2945 IPPROTO_TCP, net);
2946 if (res)
2947 goto fail;
2948 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2949 }
2950 net->ipv4.sysctl_tcp_ecn = 2;
2951 return 0;
2952
2953 fail:
2954 tcp_sk_exit(net);
2955
2956 return res;
2957 }
2958
2959 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2960 {
2961 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2962 }
2963
2964 static struct pernet_operations __net_initdata tcp_sk_ops = {
2965 .init = tcp_sk_init,
2966 .exit = tcp_sk_exit,
2967 .exit_batch = tcp_sk_exit_batch,
2968 };
2969
2970 void __init tcp_v4_init(void)
2971 {
2972 inet_hashinfo_init(&tcp_hashinfo);
2973 if (register_pernet_subsys(&tcp_sk_ops))
2974 panic("Failed to create the TCP control socket.\n");
2975 }