Merge tag 'v3.10.107' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / tcp_ipv4.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
97
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
100
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 {
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
107 }
108
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
121
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
124 */
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
135 }
136
137 return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 {
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
149 struct flowi4 *fl4;
150 struct rtable *rt;
151 int err;
152 struct ip_options_rcu *inet_opt;
153
154 if (addr_len < sizeof(struct sockaddr_in))
155 return -EINVAL;
156
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
159
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
164 if (!daddr)
165 return -EINVAL;
166 nexthop = inet_opt->opt.faddr;
167 }
168
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP,
175 orig_sport, orig_dport, sk, true);
176 if (IS_ERR(rt)) {
177 err = PTR_ERR(rt);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180 return err;
181 }
182
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184 ip_rt_put(rt);
185 return -ENETUNREACH;
186 }
187
188 if (!inet_opt || !inet_opt->opt.srr)
189 daddr = fl4->daddr;
190
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
194
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
200 tp->write_seq = 0;
201 }
202
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
206
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
209
210 inet_csk(sk)->icsk_ext_hdr_len = 0;
211 if (inet_opt)
212 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213
214 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
220 */
221 tcp_set_state(sk, TCP_SYN_SENT);
222 err = inet_hash_connect(&tcp_death_row, sk);
223 if (err)
224 goto failure;
225
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
228 if (IS_ERR(rt)) {
229 err = PTR_ERR(rt);
230 rt = NULL;
231 goto failure;
232 }
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
236 printk(KERN_INFO "[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
237 if (!tp->write_seq && likely(!tp->repair))
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 inet->inet_daddr,
240 inet->inet_sport,
241 usin->sin_port);
242
243 inet->inet_id = tp->write_seq ^ jiffies;
244
245 err = tcp_connect(sk);
246
247 rt = NULL;
248 if (err)
249 goto failure;
250
251 return 0;
252
253 failure:
254 /*
255 * This unhashes the socket and releases the local port,
256 * if necessary.
257 */
258 tcp_set_state(sk, TCP_CLOSE);
259 ip_rt_put(rt);
260 sk->sk_route_caps = 0;
261 inet->inet_dport = 0;
262 return err;
263 }
264 EXPORT_SYMBOL(tcp_v4_connect);
265
266 /*
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
270 */
271 void tcp_v4_mtu_reduced(struct sock *sk)
272 {
273 struct inet_sock *inet = inet_sk(sk);
274 struct dst_entry *dst;
275 u32 mtu;
276
277 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
278 return;
279 mtu = tcp_sk(sk)->mtu_info;
280 dst = inet_csk_update_pmtu(sk, mtu);
281 if (!dst)
282 return;
283
284 /* Something is about to be wrong... Remember soft error
285 * for the case, if this connection will not able to recover.
286 */
287 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
288 sk->sk_err_soft = EMSGSIZE;
289
290 mtu = dst_mtu(dst);
291
292 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
293 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
294 tcp_sync_mss(sk, mtu);
295
296 /* Resend the TCP packet because it's
297 * clear that the old packet has been
298 * dropped. This is the new "fast" path mtu
299 * discovery.
300 */
301 tcp_simple_retransmit(sk);
302 } /* else let the usual retransmit timer handle it */
303 }
304 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
305
306 static void do_redirect(struct sk_buff *skb, struct sock *sk)
307 {
308 struct dst_entry *dst = __sk_dst_check(sk, 0);
309
310 if (dst)
311 dst->ops->redirect(dst, sk, skb);
312 }
313
314 /*
315 * This routine is called by the ICMP module when it gets some
316 * sort of error condition. If err < 0 then the socket should
317 * be closed and the error returned to the user. If err > 0
318 * it's just the icmp type << 8 | icmp code. After adjustment
319 * header points to the first 8 bytes of the tcp header. We need
320 * to find the appropriate port.
321 *
322 * The locking strategy used here is very "optimistic". When
323 * someone else accesses the socket the ICMP is just dropped
324 * and for some paths there is no check at all.
325 * A more general error queue to queue errors for later handling
326 * is probably better.
327 *
328 */
329
330 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
331 {
332 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
333 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
334 struct inet_connection_sock *icsk;
335 struct tcp_sock *tp;
336 struct inet_sock *inet;
337 const int type = icmp_hdr(icmp_skb)->type;
338 const int code = icmp_hdr(icmp_skb)->code;
339 struct sock *sk;
340 struct sk_buff *skb;
341 struct request_sock *req;
342 __u32 seq;
343 __u32 remaining;
344 int err;
345 struct net *net = dev_net(icmp_skb->dev);
346
347 if (icmp_skb->len < (iph->ihl << 2) + 8) {
348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
349 return;
350 }
351
352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
353 iph->saddr, th->source, inet_iif(icmp_skb));
354 if (!sk) {
355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
356 return;
357 }
358 if (sk->sk_state == TCP_TIME_WAIT) {
359 inet_twsk_put(inet_twsk(sk));
360 return;
361 }
362
363 bh_lock_sock(sk);
364 /* If too many ICMPs get dropped on busy
365 * servers this needs to be solved differently.
366 * We do take care of PMTU discovery (RFC1191) special case :
367 * we can receive locally generated ICMP messages while socket is held.
368 */
369 if (sock_owned_by_user(sk)) {
370 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
371 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
372 }
373 if (sk->sk_state == TCP_CLOSE)
374 goto out;
375
376 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
377 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
378 goto out;
379 }
380
381 icsk = inet_csk(sk);
382 tp = tcp_sk(sk);
383 req = tp->fastopen_rsk;
384 seq = ntohl(th->seq);
385 if (sk->sk_state != TCP_LISTEN &&
386 !between(seq, tp->snd_una, tp->snd_nxt) &&
387 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
388 /* For a Fast Open socket, allow seq to be snt_isn. */
389 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
390 goto out;
391 }
392
393 switch (type) {
394 case ICMP_REDIRECT:
395 if (!sock_owned_by_user(sk))
396 do_redirect(icmp_skb, sk);
397 goto out;
398 case ICMP_SOURCE_QUENCH:
399 /* Just silently ignore these. */
400 goto out;
401 case ICMP_PARAMETERPROB:
402 err = EPROTO;
403 break;
404 case ICMP_DEST_UNREACH:
405 if (code > NR_ICMP_UNREACH)
406 goto out;
407
408 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
409 /* We are not interested in TCP_LISTEN and open_requests
410 * (SYN-ACKs send out by Linux are always <576bytes so
411 * they should go through unfragmented).
412 */
413 if (sk->sk_state == TCP_LISTEN)
414 goto out;
415
416 tp->mtu_info = info;
417 if (!sock_owned_by_user(sk)) {
418 tcp_v4_mtu_reduced(sk);
419 } else {
420 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
421 sock_hold(sk);
422 }
423 goto out;
424 }
425
426 err = icmp_err_convert[code].errno;
427 /* check if icmp_skb allows revert of backoff
428 * (see draft-zimmermann-tcp-lcd) */
429 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
430 break;
431 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
432 !icsk->icsk_backoff)
433 break;
434
435 /* XXX (TFO) - revisit the following logic for TFO */
436
437 if (sock_owned_by_user(sk))
438 break;
439
440 icsk->icsk_backoff--;
441 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
442 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
443 tcp_bound_rto(sk);
444
445 skb = tcp_write_queue_head(sk);
446 BUG_ON(!skb);
447
448 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
449 tcp_time_stamp - TCP_SKB_CB(skb)->when);
450
451 if (remaining) {
452 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
453 remaining, sysctl_tcp_rto_max);
454 } else {
455 /* RTO revert clocked out retransmission.
456 * Will retransmit now */
457 tcp_retransmit_timer(sk);
458 }
459
460 break;
461 case ICMP_TIME_EXCEEDED:
462 err = EHOSTUNREACH;
463 break;
464 default:
465 goto out;
466 }
467
468 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
469 * than following the TCP_SYN_RECV case and closing the socket,
470 * we ignore the ICMP error and keep trying like a fully established
471 * socket. Is this the right thing to do?
472 */
473 if (req && req->sk == NULL)
474 goto out;
475
476 switch (sk->sk_state) {
477 struct request_sock *req, **prev;
478 case TCP_LISTEN:
479 if (sock_owned_by_user(sk))
480 goto out;
481
482 req = inet_csk_search_req(sk, &prev, th->dest,
483 iph->daddr, iph->saddr);
484 if (!req)
485 goto out;
486
487 /* ICMPs are not backlogged, hence we cannot get
488 an established socket here.
489 */
490 WARN_ON(req->sk);
491
492 if (seq != tcp_rsk(req)->snt_isn) {
493 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
494 goto out;
495 }
496
497 /*
498 * Still in SYN_RECV, just remove it silently.
499 * There is no good way to pass the error to the newly
500 * created socket, and POSIX does not want network
501 * errors returned from accept().
502 */
503 inet_csk_reqsk_queue_drop(sk, req, prev);
504 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
505 goto out;
506
507 case TCP_SYN_SENT:
508 case TCP_SYN_RECV: /* Cannot happen.
509 It can f.e. if SYNs crossed,
510 or Fast Open.
511 */
512 if (!sock_owned_by_user(sk)) {
513 sk->sk_err = err;
514
515 sk->sk_error_report(sk);
516
517 tcp_done(sk);
518 } else {
519 sk->sk_err_soft = err;
520 }
521 goto out;
522 }
523
524 /* If we've already connected we will keep trying
525 * until we time out, or the user gives up.
526 *
527 * rfc1122 4.2.3.9 allows to consider as hard errors
528 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
529 * but it is obsoleted by pmtu discovery).
530 *
531 * Note, that in modern internet, where routing is unreliable
532 * and in each dark corner broken firewalls sit, sending random
533 * errors ordered by their masters even this two messages finally lose
534 * their original sense (even Linux sends invalid PORT_UNREACHs)
535 *
536 * Now we are in compliance with RFCs.
537 * --ANK (980905)
538 */
539
540 inet = inet_sk(sk);
541 if (!sock_owned_by_user(sk) && inet->recverr) {
542 sk->sk_err = err;
543 sk->sk_error_report(sk);
544 } else { /* Only an error on timeout */
545 sk->sk_err_soft = err;
546 }
547
548 out:
549 bh_unlock_sock(sk);
550 sock_put(sk);
551 }
552
553 static void __tcp_v4_send_check(struct sk_buff *skb,
554 __be32 saddr, __be32 daddr)
555 {
556 struct tcphdr *th = tcp_hdr(skb);
557
558 if (skb->ip_summed == CHECKSUM_PARTIAL) {
559 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
560 skb->csum_start = skb_transport_header(skb) - skb->head;
561 skb->csum_offset = offsetof(struct tcphdr, check);
562 } else {
563 th->check = tcp_v4_check(skb->len, saddr, daddr,
564 csum_partial(th,
565 th->doff << 2,
566 skb->csum));
567 }
568 }
569
570 /* This routine computes an IPv4 TCP checksum. */
571 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
572 {
573 const struct inet_sock *inet = inet_sk(sk);
574
575 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
576 }
577 EXPORT_SYMBOL(tcp_v4_send_check);
578
579 int tcp_v4_gso_send_check(struct sk_buff *skb)
580 {
581 const struct iphdr *iph;
582 struct tcphdr *th;
583
584 if (!pskb_may_pull(skb, sizeof(*th)))
585 return -EINVAL;
586
587 iph = ip_hdr(skb);
588 th = tcp_hdr(skb);
589
590 th->check = 0;
591 skb->ip_summed = CHECKSUM_PARTIAL;
592 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
593 return 0;
594 }
595
596 /*
597 * This routine will send an RST to the other tcp.
598 *
599 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
600 * for reset.
601 * Answer: if a packet caused RST, it is not for a socket
602 * existing in our system, if it is matched to a socket,
603 * it is just duplicate segment or bug in other side's TCP.
604 * So that we build reply only basing on parameters
605 * arrived with segment.
606 * Exception: precedence violation. We do not implement it in any case.
607 */
608
609 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
610 {
611 const struct tcphdr *th = tcp_hdr(skb);
612 struct {
613 struct tcphdr th;
614 #ifdef CONFIG_TCP_MD5SIG
615 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
616 #endif
617 } rep;
618 struct ip_reply_arg arg;
619 #ifdef CONFIG_TCP_MD5SIG
620 struct tcp_md5sig_key *key;
621 const __u8 *hash_location = NULL;
622 unsigned char newhash[16];
623 int genhash;
624 struct sock *sk1 = NULL;
625 #endif
626 struct net *net;
627
628 /* Never send a reset in response to a reset. */
629 if (th->rst)
630 return;
631
632 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
633 return;
634
635 /* Swap the send and the receive. */
636 memset(&rep, 0, sizeof(rep));
637 rep.th.dest = th->source;
638 rep.th.source = th->dest;
639 rep.th.doff = sizeof(struct tcphdr) / 4;
640 rep.th.rst = 1;
641
642 if (th->ack) {
643 rep.th.seq = th->ack_seq;
644 } else {
645 rep.th.ack = 1;
646 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
647 skb->len - (th->doff << 2));
648 }
649
650 memset(&arg, 0, sizeof(arg));
651 arg.iov[0].iov_base = (unsigned char *)&rep;
652 arg.iov[0].iov_len = sizeof(rep.th);
653
654 #ifdef CONFIG_TCP_MD5SIG
655 hash_location = tcp_parse_md5sig_option(th);
656 if (!sk && hash_location) {
657 /*
658 * active side is lost. Try to find listening socket through
659 * source port, and then find md5 key through listening socket.
660 * we are not loose security here:
661 * Incoming packet is checked with md5 hash with finding key,
662 * no RST generated if md5 hash doesn't match.
663 */
664 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
665 &tcp_hashinfo, ip_hdr(skb)->saddr,
666 th->source, ip_hdr(skb)->daddr,
667 ntohs(th->source), inet_iif(skb));
668 /* don't send rst if it can't find key */
669 if (!sk1)
670 return;
671 rcu_read_lock();
672 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
673 &ip_hdr(skb)->saddr, AF_INET);
674 if (!key)
675 goto release_sk1;
676
677 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
678 if (genhash || memcmp(hash_location, newhash, 16) != 0)
679 goto release_sk1;
680 } else {
681 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
682 &ip_hdr(skb)->saddr,
683 AF_INET) : NULL;
684 }
685
686 if (key) {
687 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
688 (TCPOPT_NOP << 16) |
689 (TCPOPT_MD5SIG << 8) |
690 TCPOLEN_MD5SIG);
691 /* Update length and the length the header thinks exists */
692 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
693 rep.th.doff = arg.iov[0].iov_len / 4;
694
695 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
696 key, ip_hdr(skb)->saddr,
697 ip_hdr(skb)->daddr, &rep.th);
698 }
699 #endif
700 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
701 ip_hdr(skb)->saddr, /* XXX */
702 arg.iov[0].iov_len, IPPROTO_TCP, 0);
703 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
704 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
705 /* When socket is gone, all binding information is lost.
706 * routing might fail in this case. No choice here, if we choose to force
707 * input interface, we will misroute in case of asymmetric route.
708 */
709 if (sk)
710 arg.bound_dev_if = sk->sk_bound_dev_if;
711
712 net = dev_net(skb_dst(skb)->dev);
713 arg.tos = ip_hdr(skb)->tos;
714 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
715 skb, ip_hdr(skb)->saddr,
716 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
717
718 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
719 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
720
721 #ifdef CONFIG_TCP_MD5SIG
722 release_sk1:
723 if (sk1) {
724 rcu_read_unlock();
725 sock_put(sk1);
726 }
727 #endif
728 }
729
730 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
731 outside socket context is ugly, certainly. What can I do?
732 */
733
734 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
735 u32 win, u32 tsval, u32 tsecr, int oif,
736 struct tcp_md5sig_key *key,
737 int reply_flags, u8 tos)
738 {
739 const struct tcphdr *th = tcp_hdr(skb);
740 struct {
741 struct tcphdr th;
742 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
743 #ifdef CONFIG_TCP_MD5SIG
744 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
745 #endif
746 ];
747 } rep;
748 struct ip_reply_arg arg;
749 struct net *net = dev_net(skb_dst(skb)->dev);
750
751 memset(&rep.th, 0, sizeof(struct tcphdr));
752 memset(&arg, 0, sizeof(arg));
753
754 arg.iov[0].iov_base = (unsigned char *)&rep;
755 arg.iov[0].iov_len = sizeof(rep.th);
756 if (tsecr) {
757 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
758 (TCPOPT_TIMESTAMP << 8) |
759 TCPOLEN_TIMESTAMP);
760 rep.opt[1] = htonl(tsval);
761 rep.opt[2] = htonl(tsecr);
762 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
763 }
764
765 /* Swap the send and the receive. */
766 rep.th.dest = th->source;
767 rep.th.source = th->dest;
768 rep.th.doff = arg.iov[0].iov_len / 4;
769 rep.th.seq = htonl(seq);
770 rep.th.ack_seq = htonl(ack);
771 rep.th.ack = 1;
772 rep.th.window = htons(win);
773
774 #ifdef CONFIG_TCP_MD5SIG
775 if (key) {
776 int offset = (tsecr) ? 3 : 0;
777
778 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
779 (TCPOPT_NOP << 16) |
780 (TCPOPT_MD5SIG << 8) |
781 TCPOLEN_MD5SIG);
782 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
783 rep.th.doff = arg.iov[0].iov_len/4;
784
785 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
786 key, ip_hdr(skb)->saddr,
787 ip_hdr(skb)->daddr, &rep.th);
788 }
789 #endif
790 arg.flags = reply_flags;
791 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
792 ip_hdr(skb)->saddr, /* XXX */
793 arg.iov[0].iov_len, IPPROTO_TCP, 0);
794 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
795 if (oif)
796 arg.bound_dev_if = oif;
797 arg.tos = tos;
798 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
799 skb, ip_hdr(skb)->saddr,
800 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
801
802 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
803 }
804
805 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
806 {
807 struct inet_timewait_sock *tw = inet_twsk(sk);
808 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
809
810 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
811 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
812 tcp_time_stamp + tcptw->tw_ts_offset,
813 tcptw->tw_ts_recent,
814 tw->tw_bound_dev_if,
815 tcp_twsk_md5_key(tcptw),
816 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 tw->tw_tos
818 );
819
820 inet_twsk_put(tw);
821 }
822
823 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
824 struct request_sock *req)
825 {
826 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
827 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
828 */
829 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
830 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
831 tcp_rsk(req)->rcv_nxt,
832 req->rcv_wnd >> inet_rsk(req)->rcv_wscale,
833 tcp_time_stamp,
834 req->ts_recent,
835 0,
836 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
837 AF_INET),
838 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
839 ip_hdr(skb)->tos);
840 }
841
842 /*
843 * Send a SYN-ACK after having received a SYN.
844 * This still operates on a request_sock only, not on a big
845 * socket.
846 */
847 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
848 struct request_sock *req,
849 u16 queue_mapping,
850 bool nocache)
851 {
852 const struct inet_request_sock *ireq = inet_rsk(req);
853 struct flowi4 fl4;
854 int err = -1;
855 struct sk_buff * skb;
856
857 /* First, grab a route. */
858 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
859 return -1;
860
861 skb = tcp_make_synack(sk, dst, req, NULL);
862
863 if (skb) {
864 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
865
866 skb_set_queue_mapping(skb, queue_mapping);
867 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
868 ireq->rmt_addr,
869 ireq->opt);
870 err = net_xmit_eval(err);
871 if (!tcp_rsk(req)->snt_synack && !err)
872 tcp_rsk(req)->snt_synack = tcp_time_stamp;
873 }
874
875 return err;
876 }
877
878 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
879 {
880 int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
881
882 if (!res)
883 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
884 return res;
885 }
886
887 /*
888 * IPv4 request_sock destructor.
889 */
890 static void tcp_v4_reqsk_destructor(struct request_sock *req)
891 {
892 kfree(inet_rsk(req)->opt);
893 }
894
895 /*
896 * Return true if a syncookie should be sent
897 */
898 bool tcp_syn_flood_action(struct sock *sk,
899 const struct sk_buff *skb,
900 const char *proto)
901 {
902 const char *msg = "Dropping request";
903 bool want_cookie = false;
904 struct listen_sock *lopt;
905
906
907
908 #ifdef CONFIG_SYN_COOKIES
909 if (sysctl_tcp_syncookies) {
910 msg = "Sending cookies";
911 want_cookie = true;
912 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
913 } else
914 #endif
915 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
916
917 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
918 if (!lopt->synflood_warned) {
919 lopt->synflood_warned = 1;
920 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
921 proto, ntohs(tcp_hdr(skb)->dest), msg);
922 }
923 return want_cookie;
924 }
925 EXPORT_SYMBOL(tcp_syn_flood_action);
926
927 /*
928 * Save and compile IPv4 options into the request_sock if needed.
929 */
930 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
931 {
932 const struct ip_options *opt = &(IPCB(skb)->opt);
933 struct ip_options_rcu *dopt = NULL;
934
935 if (opt && opt->optlen) {
936 int opt_size = sizeof(*dopt) + opt->optlen;
937
938 dopt = kmalloc(opt_size, GFP_ATOMIC);
939 if (dopt) {
940 if (ip_options_echo(&dopt->opt, skb)) {
941 kfree(dopt);
942 dopt = NULL;
943 }
944 }
945 }
946 return dopt;
947 }
948
949 #ifdef CONFIG_TCP_MD5SIG
950 /*
951 * RFC2385 MD5 checksumming requires a mapping of
952 * IP address->MD5 Key.
953 * We need to maintain these in the sk structure.
954 */
955
956 /* Find the Key structure for an address. */
957 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
958 const union tcp_md5_addr *addr,
959 int family)
960 {
961 struct tcp_sock *tp = tcp_sk(sk);
962 struct tcp_md5sig_key *key;
963 unsigned int size = sizeof(struct in_addr);
964 struct tcp_md5sig_info *md5sig;
965
966 /* caller either holds rcu_read_lock() or socket lock */
967 md5sig = rcu_dereference_check(tp->md5sig_info,
968 sock_owned_by_user(sk) ||
969 lockdep_is_held(&sk->sk_lock.slock));
970 if (!md5sig)
971 return NULL;
972 #if IS_ENABLED(CONFIG_IPV6)
973 if (family == AF_INET6)
974 size = sizeof(struct in6_addr);
975 #endif
976 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
977 if (key->family != family)
978 continue;
979 if (!memcmp(&key->addr, addr, size))
980 return key;
981 }
982 return NULL;
983 }
984 EXPORT_SYMBOL(tcp_md5_do_lookup);
985
986 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
987 struct sock *addr_sk)
988 {
989 union tcp_md5_addr *addr;
990
991 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
992 return tcp_md5_do_lookup(sk, addr, AF_INET);
993 }
994 EXPORT_SYMBOL(tcp_v4_md5_lookup);
995
996 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
997 struct request_sock *req)
998 {
999 union tcp_md5_addr *addr;
1000
1001 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
1002 return tcp_md5_do_lookup(sk, addr, AF_INET);
1003 }
1004
1005 /* This can be called on a newly created socket, from other files */
1006 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1007 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1008 {
1009 /* Add Key to the list */
1010 struct tcp_md5sig_key *key;
1011 struct tcp_sock *tp = tcp_sk(sk);
1012 struct tcp_md5sig_info *md5sig;
1013
1014 key = tcp_md5_do_lookup(sk, addr, family);
1015 if (key) {
1016 /* Pre-existing entry - just update that one. */
1017 memcpy(key->key, newkey, newkeylen);
1018 key->keylen = newkeylen;
1019 return 0;
1020 }
1021
1022 md5sig = rcu_dereference_protected(tp->md5sig_info,
1023 sock_owned_by_user(sk) ||
1024 lockdep_is_held(&sk->sk_lock.slock));
1025 if (!md5sig) {
1026 md5sig = kmalloc(sizeof(*md5sig), gfp);
1027 if (!md5sig)
1028 return -ENOMEM;
1029
1030 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1031 INIT_HLIST_HEAD(&md5sig->head);
1032 rcu_assign_pointer(tp->md5sig_info, md5sig);
1033 }
1034
1035 key = sock_kmalloc(sk, sizeof(*key), gfp);
1036 if (!key)
1037 return -ENOMEM;
1038 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1039 sock_kfree_s(sk, key, sizeof(*key));
1040 return -ENOMEM;
1041 }
1042
1043 memcpy(key->key, newkey, newkeylen);
1044 key->keylen = newkeylen;
1045 key->family = family;
1046 memcpy(&key->addr, addr,
1047 (family == AF_INET6) ? sizeof(struct in6_addr) :
1048 sizeof(struct in_addr));
1049 hlist_add_head_rcu(&key->node, &md5sig->head);
1050 return 0;
1051 }
1052 EXPORT_SYMBOL(tcp_md5_do_add);
1053
1054 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1055 {
1056 struct tcp_sock *tp = tcp_sk(sk);
1057 struct tcp_md5sig_key *key;
1058 struct tcp_md5sig_info *md5sig;
1059
1060 key = tcp_md5_do_lookup(sk, addr, family);
1061 if (!key)
1062 return -ENOENT;
1063 hlist_del_rcu(&key->node);
1064 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1065 kfree_rcu(key, rcu);
1066 md5sig = rcu_dereference_protected(tp->md5sig_info,
1067 sock_owned_by_user(sk));
1068 if (hlist_empty(&md5sig->head))
1069 tcp_free_md5sig_pool();
1070 return 0;
1071 }
1072 EXPORT_SYMBOL(tcp_md5_do_del);
1073
1074 static void tcp_clear_md5_list(struct sock *sk)
1075 {
1076 struct tcp_sock *tp = tcp_sk(sk);
1077 struct tcp_md5sig_key *key;
1078 struct hlist_node *n;
1079 struct tcp_md5sig_info *md5sig;
1080
1081 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1082
1083 if (!hlist_empty(&md5sig->head))
1084 tcp_free_md5sig_pool();
1085 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1086 hlist_del_rcu(&key->node);
1087 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1088 kfree_rcu(key, rcu);
1089 }
1090 }
1091
1092 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1093 int optlen)
1094 {
1095 struct tcp_md5sig cmd;
1096 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1097
1098 if (optlen < sizeof(cmd))
1099 return -EINVAL;
1100
1101 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1102 return -EFAULT;
1103
1104 if (sin->sin_family != AF_INET)
1105 return -EINVAL;
1106
1107 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1108 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1109 AF_INET);
1110
1111 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1112 return -EINVAL;
1113
1114 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1115 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1116 GFP_KERNEL);
1117 }
1118
1119 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1120 __be32 daddr, __be32 saddr, int nbytes)
1121 {
1122 struct tcp4_pseudohdr *bp;
1123 struct scatterlist sg;
1124
1125 bp = &hp->md5_blk.ip4;
1126
1127 /*
1128 * 1. the TCP pseudo-header (in the order: source IP address,
1129 * destination IP address, zero-padded protocol number, and
1130 * segment length)
1131 */
1132 bp->saddr = saddr;
1133 bp->daddr = daddr;
1134 bp->pad = 0;
1135 bp->protocol = IPPROTO_TCP;
1136 bp->len = cpu_to_be16(nbytes);
1137
1138 sg_init_one(&sg, bp, sizeof(*bp));
1139 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1140 }
1141
1142 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1143 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1144 {
1145 struct tcp_md5sig_pool *hp;
1146 struct hash_desc *desc;
1147
1148 hp = tcp_get_md5sig_pool();
1149 if (!hp)
1150 goto clear_hash_noput;
1151 desc = &hp->md5_desc;
1152
1153 if (crypto_hash_init(desc))
1154 goto clear_hash;
1155 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1156 goto clear_hash;
1157 if (tcp_md5_hash_header(hp, th))
1158 goto clear_hash;
1159 if (tcp_md5_hash_key(hp, key))
1160 goto clear_hash;
1161 if (crypto_hash_final(desc, md5_hash))
1162 goto clear_hash;
1163
1164 tcp_put_md5sig_pool();
1165 return 0;
1166
1167 clear_hash:
1168 tcp_put_md5sig_pool();
1169 clear_hash_noput:
1170 memset(md5_hash, 0, 16);
1171 return 1;
1172 }
1173
1174 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1175 const struct sock *sk, const struct request_sock *req,
1176 const struct sk_buff *skb)
1177 {
1178 struct tcp_md5sig_pool *hp;
1179 struct hash_desc *desc;
1180 const struct tcphdr *th = tcp_hdr(skb);
1181 __be32 saddr, daddr;
1182
1183 if (sk) {
1184 saddr = inet_sk(sk)->inet_saddr;
1185 daddr = inet_sk(sk)->inet_daddr;
1186 } else if (req) {
1187 saddr = inet_rsk(req)->loc_addr;
1188 daddr = inet_rsk(req)->rmt_addr;
1189 } else {
1190 const struct iphdr *iph = ip_hdr(skb);
1191 saddr = iph->saddr;
1192 daddr = iph->daddr;
1193 }
1194
1195 hp = tcp_get_md5sig_pool();
1196 if (!hp)
1197 goto clear_hash_noput;
1198 desc = &hp->md5_desc;
1199
1200 if (crypto_hash_init(desc))
1201 goto clear_hash;
1202
1203 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1204 goto clear_hash;
1205 if (tcp_md5_hash_header(hp, th))
1206 goto clear_hash;
1207 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1208 goto clear_hash;
1209 if (tcp_md5_hash_key(hp, key))
1210 goto clear_hash;
1211 if (crypto_hash_final(desc, md5_hash))
1212 goto clear_hash;
1213
1214 tcp_put_md5sig_pool();
1215 return 0;
1216
1217 clear_hash:
1218 tcp_put_md5sig_pool();
1219 clear_hash_noput:
1220 memset(md5_hash, 0, 16);
1221 return 1;
1222 }
1223 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1224
1225 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1226 {
1227 /*
1228 * This gets called for each TCP segment that arrives
1229 * so we want to be efficient.
1230 * We have 3 drop cases:
1231 * o No MD5 hash and one expected.
1232 * o MD5 hash and we're not expecting one.
1233 * o MD5 hash and its wrong.
1234 */
1235 const __u8 *hash_location = NULL;
1236 struct tcp_md5sig_key *hash_expected;
1237 const struct iphdr *iph = ip_hdr(skb);
1238 const struct tcphdr *th = tcp_hdr(skb);
1239 int genhash;
1240 unsigned char newhash[16];
1241
1242 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1243 AF_INET);
1244 hash_location = tcp_parse_md5sig_option(th);
1245
1246 /* We've parsed the options - do we have a hash? */
1247 if (!hash_expected && !hash_location)
1248 return false;
1249
1250 if (hash_expected && !hash_location) {
1251 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1252 return true;
1253 }
1254
1255 if (!hash_expected && hash_location) {
1256 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1257 return true;
1258 }
1259
1260 /* Okay, so this is hash_expected and hash_location -
1261 * so we need to calculate the checksum.
1262 */
1263 genhash = tcp_v4_md5_hash_skb(newhash,
1264 hash_expected,
1265 NULL, NULL, skb);
1266
1267 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1268 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1269 &iph->saddr, ntohs(th->source),
1270 &iph->daddr, ntohs(th->dest),
1271 genhash ? " tcp_v4_calc_md5_hash failed"
1272 : "");
1273 return true;
1274 }
1275 return false;
1276 }
1277
1278 #endif
1279
1280 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1281 .family = PF_INET,
1282 .obj_size = sizeof(struct tcp_request_sock),
1283 .rtx_syn_ack = tcp_v4_rtx_synack,
1284 .send_ack = tcp_v4_reqsk_send_ack,
1285 .destructor = tcp_v4_reqsk_destructor,
1286 .send_reset = tcp_v4_send_reset,
1287 .syn_ack_timeout = tcp_syn_ack_timeout,
1288 };
1289
1290 #ifdef CONFIG_TCP_MD5SIG
1291 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1292 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1293 .calc_md5_hash = tcp_v4_md5_hash_skb,
1294 };
1295 #endif
1296
1297 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1298 struct request_sock *req,
1299 struct tcp_fastopen_cookie *foc,
1300 struct tcp_fastopen_cookie *valid_foc)
1301 {
1302 bool skip_cookie = false;
1303 struct fastopen_queue *fastopenq;
1304
1305 if (likely(!fastopen_cookie_present(foc))) {
1306 /* See include/net/tcp.h for the meaning of these knobs */
1307 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1308 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1309 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1310 skip_cookie = true; /* no cookie to validate */
1311 else
1312 return false;
1313 }
1314 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1315 /* A FO option is present; bump the counter. */
1316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1317
1318 /* Make sure the listener has enabled fastopen, and we don't
1319 * exceed the max # of pending TFO requests allowed before trying
1320 * to validating the cookie in order to avoid burning CPU cycles
1321 * unnecessarily.
1322 *
1323 * XXX (TFO) - The implication of checking the max_qlen before
1324 * processing a cookie request is that clients can't differentiate
1325 * between qlen overflow causing Fast Open to be disabled
1326 * temporarily vs a server not supporting Fast Open at all.
1327 */
1328 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1329 fastopenq == NULL || fastopenq->max_qlen == 0)
1330 return false;
1331
1332 if (fastopenq->qlen >= fastopenq->max_qlen) {
1333 struct request_sock *req1;
1334 spin_lock(&fastopenq->lock);
1335 req1 = fastopenq->rskq_rst_head;
1336 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1337 spin_unlock(&fastopenq->lock);
1338 NET_INC_STATS_BH(sock_net(sk),
1339 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1340 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1341 foc->len = -1;
1342 return false;
1343 }
1344 fastopenq->rskq_rst_head = req1->dl_next;
1345 fastopenq->qlen--;
1346 spin_unlock(&fastopenq->lock);
1347 reqsk_free(req1);
1348 }
1349 if (skip_cookie) {
1350 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1351 return true;
1352 }
1353 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1354 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1355 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1356 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1357 memcmp(&foc->val[0], &valid_foc->val[0],
1358 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1359 return false;
1360 valid_foc->len = -1;
1361 }
1362 /* Acknowledge the data received from the peer. */
1363 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1364 return true;
1365 } else if (foc->len == 0) { /* Client requesting a cookie */
1366 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1367 NET_INC_STATS_BH(sock_net(sk),
1368 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1369 } else {
1370 /* Client sent a cookie with wrong size. Treat it
1371 * the same as invalid and return a valid one.
1372 */
1373 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1374 }
1375 return false;
1376 }
1377
1378 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1379 struct sk_buff *skb,
1380 struct sk_buff *skb_synack,
1381 struct request_sock *req)
1382 {
1383 struct tcp_sock *tp = tcp_sk(sk);
1384 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1385 const struct inet_request_sock *ireq = inet_rsk(req);
1386 struct sock *child;
1387 int err;
1388
1389 req->num_retrans = 0;
1390 req->num_timeout = 0;
1391 req->sk = NULL;
1392
1393 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1394 if (child == NULL) {
1395 NET_INC_STATS_BH(sock_net(sk),
1396 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1397 kfree_skb(skb_synack);
1398 return -1;
1399 }
1400 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1401 ireq->rmt_addr, ireq->opt);
1402 err = net_xmit_eval(err);
1403 if (!err)
1404 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1405 /* XXX (TFO) - is it ok to ignore error and continue? */
1406
1407 spin_lock(&queue->fastopenq->lock);
1408 queue->fastopenq->qlen++;
1409 spin_unlock(&queue->fastopenq->lock);
1410
1411 /* Initialize the child socket. Have to fix some values to take
1412 * into account the child is a Fast Open socket and is created
1413 * only out of the bits carried in the SYN packet.
1414 */
1415 tp = tcp_sk(child);
1416
1417 tp->fastopen_rsk = req;
1418 /* Do a hold on the listner sk so that if the listener is being
1419 * closed, the child that has been accepted can live on and still
1420 * access listen_lock.
1421 */
1422 sock_hold(sk);
1423 tcp_rsk(req)->listener = sk;
1424
1425 /* RFC1323: The window in SYN & SYN/ACK segments is never
1426 * scaled. So correct it appropriately.
1427 */
1428 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1429 tp->max_window = tp->snd_wnd;
1430
1431 /* Activate the retrans timer so that SYNACK can be retransmitted.
1432 * The request socket is not added to the SYN table of the parent
1433 * because it's been added to the accept queue directly.
1434 */
1435 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1436 TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
1437
1438 /* Add the child socket directly into the accept queue */
1439 inet_csk_reqsk_queue_add(sk, req, child);
1440
1441 /* Now finish processing the fastopen child socket. */
1442 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1443 tcp_init_congestion_control(child);
1444 tcp_mtup_init(child);
1445 tcp_init_buffer_space(child);
1446 tcp_init_metrics(child);
1447
1448 /* Queue the data carried in the SYN packet. We need to first
1449 * bump skb's refcnt because the caller will attempt to free it.
1450 *
1451 * XXX (TFO) - we honor a zero-payload TFO request for now.
1452 * (Any reason not to?)
1453 */
1454 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1455 /* Don't queue the skb if there is no payload in SYN.
1456 * XXX (TFO) - How about SYN+FIN?
1457 */
1458 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1459 } else {
1460 skb = skb_get(skb);
1461 skb_dst_drop(skb);
1462 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1463 skb_set_owner_r(skb, child);
1464 __skb_queue_tail(&child->sk_receive_queue, skb);
1465 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1466 tp->syn_data_acked = 1;
1467 }
1468 sk->sk_data_ready(sk, 0);
1469 bh_unlock_sock(child);
1470 sock_put(child);
1471 WARN_ON(req->sk == NULL);
1472 return 0;
1473 }
1474
1475 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1476 {
1477 struct tcp_options_received tmp_opt;
1478 struct request_sock *req;
1479 struct inet_request_sock *ireq;
1480 struct tcp_sock *tp = tcp_sk(sk);
1481 struct dst_entry *dst = NULL;
1482 __be32 saddr = ip_hdr(skb)->saddr;
1483 __be32 daddr = ip_hdr(skb)->daddr;
1484 __u32 isn = TCP_SKB_CB(skb)->when;
1485 bool want_cookie = false;
1486 struct flowi4 fl4;
1487 struct tcp_fastopen_cookie foc = { .len = -1 };
1488 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1489 struct sk_buff *skb_synack;
1490 int do_fastopen;
1491
1492 /* Never answer to SYNs send to broadcast or multicast */
1493 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1494 goto drop;
1495
1496 /* TW buckets are converted to open requests without
1497 * limitations, they conserve resources and peer is
1498 * evidently real one.
1499 */
1500 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1501 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1502 if (!want_cookie)
1503 goto drop;
1504 }
1505
1506 /* Accept backlog is full. If we have already queued enough
1507 * of warm entries in syn queue, drop request. It is better than
1508 * clogging syn queue with openreqs with exponentially increasing
1509 * timeout.
1510 */
1511 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1512 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1513 goto drop;
1514 }
1515
1516 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1517 if (!req)
1518 goto drop;
1519
1520 #ifdef CONFIG_TCP_MD5SIG
1521 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1522 #endif
1523
1524 tcp_clear_options(&tmp_opt);
1525 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1526 tmp_opt.user_mss = tp->rx_opt.user_mss;
1527 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1528
1529 if (want_cookie && !tmp_opt.saw_tstamp)
1530 tcp_clear_options(&tmp_opt);
1531
1532 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1533 tcp_openreq_init(req, &tmp_opt, skb);
1534
1535 ireq = inet_rsk(req);
1536 ireq->loc_addr = daddr;
1537 ireq->rmt_addr = saddr;
1538 ireq->no_srccheck = inet_sk(sk)->transparent;
1539 ireq->opt = tcp_v4_save_options(skb);
1540 ireq->ir_mark = inet_request_mark(sk, skb);
1541
1542 if (security_inet_conn_request(sk, skb, req))
1543 goto drop_and_free;
1544
1545 if (!want_cookie || tmp_opt.tstamp_ok)
1546 TCP_ECN_create_request(req, skb, sock_net(sk));
1547
1548 if (want_cookie) {
1549 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1550 req->cookie_ts = tmp_opt.tstamp_ok;
1551 } else if (!isn) {
1552 /* VJ's idea. We save last timestamp seen
1553 * from the destination in peer table, when entering
1554 * state TIME-WAIT, and check against it before
1555 * accepting new connection request.
1556 *
1557 * If "isn" is not zero, this request hit alive
1558 * timewait bucket, so that all the necessary checks
1559 * are made in the function processing timewait state.
1560 */
1561 if (tmp_opt.saw_tstamp &&
1562 tcp_death_row.sysctl_tw_recycle &&
1563 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1564 fl4.daddr == saddr) {
1565 if (!tcp_peer_is_proven(req, dst, true)) {
1566 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1567 goto drop_and_release;
1568 }
1569 }
1570 /* Kill the following clause, if you dislike this way. */
1571 else if (!sysctl_tcp_syncookies &&
1572 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1573 (sysctl_max_syn_backlog >> 2)) &&
1574 !tcp_peer_is_proven(req, dst, false)) {
1575 /* Without syncookies last quarter of
1576 * backlog is filled with destinations,
1577 * proven to be alive.
1578 * It means that we continue to communicate
1579 * to destinations, already remembered
1580 * to the moment of synflood.
1581 */
1582 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1583 &saddr, ntohs(tcp_hdr(skb)->source));
1584 goto drop_and_release;
1585 }
1586
1587 isn = tcp_v4_init_sequence(skb);
1588 }
1589 tcp_rsk(req)->snt_isn = isn;
1590
1591 if (dst == NULL) {
1592 dst = inet_csk_route_req(sk, &fl4, req);
1593 if (dst == NULL)
1594 goto drop_and_free;
1595 }
1596 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1597
1598 /* We don't call tcp_v4_send_synack() directly because we need
1599 * to make sure a child socket can be created successfully before
1600 * sending back synack!
1601 *
1602 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1603 * (or better yet, call tcp_send_synack() in the child context
1604 * directly, but will have to fix bunch of other code first)
1605 * after syn_recv_sock() except one will need to first fix the
1606 * latter to remove its dependency on the current implementation
1607 * of tcp_v4_send_synack()->tcp_select_initial_window().
1608 */
1609 skb_synack = tcp_make_synack(sk, dst, req,
1610 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1611
1612 if (skb_synack) {
1613 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1614 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1615 } else
1616 goto drop_and_free;
1617
1618 if (likely(!do_fastopen)) {
1619 int err;
1620 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1621 ireq->rmt_addr, ireq->opt);
1622 err = net_xmit_eval(err);
1623 if (err || want_cookie)
1624 goto drop_and_free;
1625
1626 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1627 tcp_rsk(req)->listener = NULL;
1628 /* Add the request_sock to the SYN table */
1629 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1630 if (fastopen_cookie_present(&foc) && foc.len != 0)
1631 NET_INC_STATS_BH(sock_net(sk),
1632 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1633 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1634 goto drop_and_free;
1635
1636 return 0;
1637
1638 drop_and_release:
1639 dst_release(dst);
1640 drop_and_free:
1641 reqsk_free(req);
1642 drop:
1643 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1644 return 0;
1645 }
1646 EXPORT_SYMBOL(tcp_v4_conn_request);
1647
1648
1649 /*
1650 * The three way handshake has completed - we got a valid synack -
1651 * now create the new socket.
1652 */
1653 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1654 struct request_sock *req,
1655 struct dst_entry *dst)
1656 {
1657 struct inet_request_sock *ireq;
1658 struct inet_sock *newinet;
1659 struct tcp_sock *newtp;
1660 struct sock *newsk;
1661 #ifdef CONFIG_TCP_MD5SIG
1662 struct tcp_md5sig_key *key;
1663 #endif
1664 struct ip_options_rcu *inet_opt;
1665
1666 if (sk_acceptq_is_full(sk))
1667 goto exit_overflow;
1668
1669 newsk = tcp_create_openreq_child(sk, req, skb);
1670 if (!newsk)
1671 goto exit_nonewsk;
1672
1673 newsk->sk_gso_type = SKB_GSO_TCPV4;
1674 inet_sk_rx_dst_set(newsk, skb);
1675
1676 newtp = tcp_sk(newsk);
1677 newinet = inet_sk(newsk);
1678 ireq = inet_rsk(req);
1679 newinet->inet_daddr = ireq->rmt_addr;
1680 newinet->inet_rcv_saddr = ireq->loc_addr;
1681 newinet->inet_saddr = ireq->loc_addr;
1682 inet_opt = ireq->opt;
1683 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1684 ireq->opt = NULL;
1685 newinet->mc_index = inet_iif(skb);
1686 newinet->mc_ttl = ip_hdr(skb)->ttl;
1687 newinet->rcv_tos = ip_hdr(skb)->tos;
1688 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1689 if (inet_opt)
1690 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1691 newinet->inet_id = newtp->write_seq ^ jiffies;
1692
1693 if (!dst) {
1694 dst = inet_csk_route_child_sock(sk, newsk, req);
1695 if (!dst)
1696 goto put_and_exit;
1697 } else {
1698 /* syncookie case : see end of cookie_v4_check() */
1699 }
1700 sk_setup_caps(newsk, dst);
1701
1702 tcp_mtup_init(newsk);
1703 tcp_sync_mss(newsk, dst_mtu(dst));
1704 newtp->advmss = dst_metric_advmss(dst);
1705 if (tcp_sk(sk)->rx_opt.user_mss &&
1706 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1707 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1708
1709 tcp_initialize_rcv_mss(newsk);
1710 tcp_synack_rtt_meas(newsk, req);
1711 newtp->total_retrans = req->num_retrans;
1712
1713 #ifdef CONFIG_TCP_MD5SIG
1714 /* Copy over the MD5 key from the original socket */
1715 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1716 AF_INET);
1717 if (key != NULL) {
1718 /*
1719 * We're using one, so create a matching key
1720 * on the newsk structure. If we fail to get
1721 * memory, then we end up not copying the key
1722 * across. Shucks.
1723 */
1724 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1725 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1726 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1727 }
1728 #endif
1729
1730 if (__inet_inherit_port(sk, newsk) < 0)
1731 goto put_and_exit;
1732 __inet_hash_nolisten(newsk, NULL);
1733
1734 return newsk;
1735
1736 exit_overflow:
1737 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1738 exit_nonewsk:
1739 dst_release(dst);
1740 exit:
1741 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1742 return NULL;
1743 put_and_exit:
1744 inet_csk_prepare_forced_close(newsk);
1745 tcp_done(newsk);
1746 goto exit;
1747 }
1748 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1749
1750 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1751 {
1752 struct tcphdr *th = tcp_hdr(skb);
1753 const struct iphdr *iph = ip_hdr(skb);
1754 struct sock *nsk;
1755 struct request_sock **prev;
1756 /* Find possible connection requests. */
1757 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1758 iph->saddr, iph->daddr);
1759 if (req)
1760 return tcp_check_req(sk, skb, req, prev, false);
1761
1762 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1763 th->source, iph->daddr, th->dest, inet_iif(skb));
1764
1765 if (nsk) {
1766 if (nsk->sk_state != TCP_TIME_WAIT) {
1767 bh_lock_sock(nsk);
1768 return nsk;
1769 }
1770 inet_twsk_put(inet_twsk(nsk));
1771 return NULL;
1772 }
1773
1774 #ifdef CONFIG_SYN_COOKIES
1775 if (!th->syn)
1776 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1777 #endif
1778 return sk;
1779 }
1780
1781 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1782 {
1783 const struct iphdr *iph = ip_hdr(skb);
1784
1785 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1786 if (!tcp_v4_check(skb->len, iph->saddr,
1787 iph->daddr, skb->csum)) {
1788 skb->ip_summed = CHECKSUM_UNNECESSARY;
1789 return 0;
1790 }
1791 }
1792
1793 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1794 skb->len, IPPROTO_TCP, 0);
1795
1796 if (skb->len <= 76) {
1797 return __skb_checksum_complete(skb);
1798 }
1799 return 0;
1800 }
1801
1802
1803 /* The socket must have it's spinlock held when we get
1804 * here.
1805 *
1806 * We have a potential double-lock case here, so even when
1807 * doing backlog processing we use the BH locking scheme.
1808 * This is because we cannot sleep with the original spinlock
1809 * held.
1810 */
1811 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1812 {
1813 struct sock *rsk;
1814 #ifdef CONFIG_TCP_MD5SIG
1815 /*
1816 * We really want to reject the packet as early as possible
1817 * if:
1818 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1819 * o There is an MD5 option and we're not expecting one
1820 */
1821 if (tcp_v4_inbound_md5_hash(sk, skb))
1822 goto discard;
1823 #endif
1824
1825 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1826 struct dst_entry *dst = sk->sk_rx_dst;
1827
1828 sock_rps_save_rxhash(sk, skb);
1829 if (dst) {
1830 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1831 dst->ops->check(dst, 0) == NULL) {
1832 dst_release(dst);
1833 sk->sk_rx_dst = NULL;
1834 }
1835 }
1836 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1837 rsk = sk;
1838 goto reset;
1839 }
1840 return 0;
1841 }
1842
1843 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1844 goto csum_err;
1845
1846 if (sk->sk_state == TCP_LISTEN) {
1847 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1848 if (!nsk)
1849 goto discard;
1850
1851 if (nsk != sk) {
1852 sock_rps_save_rxhash(nsk, skb);
1853 if (tcp_child_process(sk, nsk, skb)) {
1854 rsk = nsk;
1855 goto reset;
1856 }
1857 return 0;
1858 }
1859 } else
1860 sock_rps_save_rxhash(sk, skb);
1861
1862 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1863 rsk = sk;
1864 goto reset;
1865 }
1866 return 0;
1867
1868 reset:
1869 tcp_v4_send_reset(rsk, skb);
1870 discard:
1871 kfree_skb(skb);
1872 /* Be careful here. If this function gets more complicated and
1873 * gcc suffers from register pressure on the x86, sk (in %ebx)
1874 * might be destroyed here. This current version compiles correctly,
1875 * but you have been warned.
1876 */
1877 return 0;
1878
1879 csum_err:
1880 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1881 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1882 goto discard;
1883 }
1884 EXPORT_SYMBOL(tcp_v4_do_rcv);
1885
1886 void tcp_v4_early_demux(struct sk_buff *skb)
1887 {
1888 const struct iphdr *iph;
1889 const struct tcphdr *th;
1890 struct sock *sk;
1891
1892 if (skb->pkt_type != PACKET_HOST)
1893 return;
1894
1895 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1896 return;
1897
1898 iph = ip_hdr(skb);
1899 th = tcp_hdr(skb);
1900
1901 if (th->doff < sizeof(struct tcphdr) / 4)
1902 return;
1903
1904 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1905 iph->saddr, th->source,
1906 iph->daddr, ntohs(th->dest),
1907 skb->skb_iif);
1908 if (sk) {
1909 skb->sk = sk;
1910 skb->destructor = sock_edemux;
1911 if (sk->sk_state != TCP_TIME_WAIT) {
1912 struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
1913
1914 if (dst)
1915 dst = dst_check(dst, 0);
1916 if (dst &&
1917 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1918 skb_dst_set_noref(skb, dst);
1919 }
1920 }
1921 }
1922
1923 /* Packet is added to VJ-style prequeue for processing in process
1924 * context, if a reader task is waiting. Apparently, this exciting
1925 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1926 * failed somewhere. Latency? Burstiness? Well, at least now we will
1927 * see, why it failed. 8)8) --ANK
1928 *
1929 */
1930 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1931 {
1932 struct tcp_sock *tp = tcp_sk(sk);
1933
1934 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1935 return false;
1936
1937 if (skb->len <= tcp_hdrlen(skb) &&
1938 skb_queue_len(&tp->ucopy.prequeue) == 0)
1939 return false;
1940
1941 skb_dst_force(skb);
1942 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1943 tp->ucopy.memory += skb->truesize;
1944 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1945 struct sk_buff *skb1;
1946
1947 BUG_ON(sock_owned_by_user(sk));
1948
1949 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1950 sk_backlog_rcv(sk, skb1);
1951 NET_INC_STATS_BH(sock_net(sk),
1952 LINUX_MIB_TCPPREQUEUEDROPPED);
1953 }
1954
1955 tp->ucopy.memory = 0;
1956 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1957 wake_up_interruptible_sync_poll(sk_sleep(sk),
1958 POLLIN | POLLRDNORM | POLLRDBAND);
1959 if (!inet_csk_ack_scheduled(sk))
1960 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1961 (3 * tcp_rto_min(sk)) / 4,
1962 sysctl_tcp_rto_max);
1963 }
1964 return true;
1965 }
1966 EXPORT_SYMBOL(tcp_prequeue);
1967
1968 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1969 {
1970 struct tcphdr *th = (struct tcphdr *)skb->data;
1971 unsigned int eaten = skb->len;
1972 int err;
1973
1974 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1975 if (!err) {
1976 eaten -= skb->len;
1977 TCP_SKB_CB(skb)->end_seq -= eaten;
1978 }
1979 return err;
1980 }
1981 EXPORT_SYMBOL(tcp_filter);
1982
1983 /*
1984 * From tcp_input.c
1985 */
1986
1987 int tcp_v4_rcv(struct sk_buff *skb)
1988 {
1989 const struct iphdr *iph;
1990 const struct tcphdr *th;
1991 struct sock *sk;
1992 int ret;
1993 struct net *net = dev_net(skb->dev);
1994
1995 if (skb->pkt_type != PACKET_HOST)
1996 goto discard_it;
1997
1998 /* Count it even if it's bad */
1999 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
2000
2001 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2002 goto discard_it;
2003
2004 th = tcp_hdr(skb);
2005
2006 if (th->doff < sizeof(struct tcphdr) / 4)
2007 goto bad_packet;
2008 if (!pskb_may_pull(skb, th->doff * 4))
2009 goto discard_it;
2010
2011 /* An explanation is required here, I think.
2012 * Packet length and doff are validated by header prediction,
2013 * provided case of th->doff==0 is eliminated.
2014 * So, we defer the checks. */
2015 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
2016 goto csum_error;
2017
2018 th = tcp_hdr(skb);
2019 iph = ip_hdr(skb);
2020 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
2021 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
2022 skb->len - th->doff * 4);
2023 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2024 TCP_SKB_CB(skb)->when = 0;
2025 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2026 TCP_SKB_CB(skb)->sacked = 0;
2027
2028 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2029 if (!sk)
2030 goto no_tcp_socket;
2031
2032 process:
2033 if (sk->sk_state == TCP_TIME_WAIT)
2034 goto do_time_wait;
2035
2036 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2037 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2038 goto discard_and_relse;
2039 }
2040
2041 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2042 goto discard_and_relse;
2043 nf_reset(skb);
2044
2045 if (tcp_filter(sk, skb))
2046 goto discard_and_relse;
2047 th = (const struct tcphdr *)skb->data;
2048 iph = ip_hdr(skb);
2049
2050 skb->dev = NULL;
2051
2052 bh_lock_sock_nested(sk);
2053 ret = 0;
2054 if (!sock_owned_by_user(sk)) {
2055 #ifdef CONFIG_NET_DMA
2056 struct tcp_sock *tp = tcp_sk(sk);
2057 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2058 tp->ucopy.dma_chan = net_dma_find_channel();
2059 if (tp->ucopy.dma_chan)
2060 ret = tcp_v4_do_rcv(sk, skb);
2061 else
2062 #endif
2063 {
2064 if (!tcp_prequeue(sk, skb))
2065 ret = tcp_v4_do_rcv(sk, skb);
2066 }
2067 } else if (unlikely(sk_add_backlog(sk, skb,
2068 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2069 bh_unlock_sock(sk);
2070 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2071 goto discard_and_relse;
2072 }
2073 bh_unlock_sock(sk);
2074
2075 sock_put(sk);
2076
2077 return ret;
2078
2079 no_tcp_socket:
2080 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2081 goto discard_it;
2082
2083 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2084 csum_error:
2085 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
2086 bad_packet:
2087 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2088 } else {
2089 tcp_v4_send_reset(NULL, skb);
2090 }
2091
2092 discard_it:
2093 /* Discard frame. */
2094 kfree_skb(skb);
2095 return 0;
2096
2097 discard_and_relse:
2098 sock_put(sk);
2099 goto discard_it;
2100
2101 do_time_wait:
2102 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2103 inet_twsk_put(inet_twsk(sk));
2104 goto discard_it;
2105 }
2106
2107 if (skb->len < (th->doff << 2)) {
2108 inet_twsk_put(inet_twsk(sk));
2109 goto bad_packet;
2110 }
2111 if (tcp_checksum_complete(skb)) {
2112 inet_twsk_put(inet_twsk(sk));
2113 goto csum_error;
2114 }
2115 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2116 case TCP_TW_SYN: {
2117 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2118 &tcp_hashinfo,
2119 iph->saddr, th->source,
2120 iph->daddr, th->dest,
2121 inet_iif(skb));
2122 if (sk2) {
2123 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2124 inet_twsk_put(inet_twsk(sk));
2125 sk = sk2;
2126 goto process;
2127 }
2128 /* Fall through to ACK */
2129 }
2130 case TCP_TW_ACK:
2131 tcp_v4_timewait_ack(sk, skb);
2132 break;
2133 case TCP_TW_RST:
2134 goto no_tcp_socket;
2135 case TCP_TW_SUCCESS:;
2136 }
2137 goto discard_it;
2138 }
2139
2140 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2141 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2142 .twsk_unique = tcp_twsk_unique,
2143 .twsk_destructor= tcp_twsk_destructor,
2144 };
2145
2146 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2147 {
2148 struct dst_entry *dst = skb_dst(skb);
2149
2150 dst_hold(dst);
2151 sk->sk_rx_dst = dst;
2152 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2153 }
2154 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2155
2156 const struct inet_connection_sock_af_ops ipv4_specific = {
2157 .queue_xmit = ip_queue_xmit,
2158 .send_check = tcp_v4_send_check,
2159 .rebuild_header = inet_sk_rebuild_header,
2160 .sk_rx_dst_set = inet_sk_rx_dst_set,
2161 .conn_request = tcp_v4_conn_request,
2162 .syn_recv_sock = tcp_v4_syn_recv_sock,
2163 .net_header_len = sizeof(struct iphdr),
2164 .setsockopt = ip_setsockopt,
2165 .getsockopt = ip_getsockopt,
2166 .addr2sockaddr = inet_csk_addr2sockaddr,
2167 .sockaddr_len = sizeof(struct sockaddr_in),
2168 .bind_conflict = inet_csk_bind_conflict,
2169 #ifdef CONFIG_COMPAT
2170 .compat_setsockopt = compat_ip_setsockopt,
2171 .compat_getsockopt = compat_ip_getsockopt,
2172 #endif
2173 .mtu_reduced = tcp_v4_mtu_reduced,
2174 };
2175 EXPORT_SYMBOL(ipv4_specific);
2176
2177 #ifdef CONFIG_TCP_MD5SIG
2178 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2179 .md5_lookup = tcp_v4_md5_lookup,
2180 .calc_md5_hash = tcp_v4_md5_hash_skb,
2181 .md5_parse = tcp_v4_parse_md5_keys,
2182 };
2183 #endif
2184
2185 /* NOTE: A lot of things set to zero explicitly by call to
2186 * sk_alloc() so need not be done here.
2187 */
2188 static int tcp_v4_init_sock(struct sock *sk)
2189 {
2190 struct inet_connection_sock *icsk = inet_csk(sk);
2191
2192 tcp_init_sock(sk);
2193 icsk->icsk_MMSRB = 0;
2194
2195 icsk->icsk_af_ops = &ipv4_specific;
2196
2197 #ifdef CONFIG_TCP_MD5SIG
2198 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2199 #endif
2200
2201 return 0;
2202 }
2203
2204 void tcp_v4_destroy_sock(struct sock *sk)
2205 {
2206 struct tcp_sock *tp = tcp_sk(sk);
2207
2208 tcp_clear_xmit_timers(sk);
2209
2210 tcp_cleanup_congestion_control(sk);
2211
2212 /* Cleanup up the write buffer. */
2213 tcp_write_queue_purge(sk);
2214
2215 /* Cleans up our, hopefully empty, out_of_order_queue. */
2216 __skb_queue_purge(&tp->out_of_order_queue);
2217
2218 #ifdef CONFIG_TCP_MD5SIG
2219 /* Clean up the MD5 key list, if any */
2220 if (tp->md5sig_info) {
2221 tcp_clear_md5_list(sk);
2222 kfree_rcu(tp->md5sig_info, rcu);
2223 tp->md5sig_info = NULL;
2224 }
2225 #endif
2226
2227 #ifdef CONFIG_NET_DMA
2228 /* Cleans up our sk_async_wait_queue */
2229 __skb_queue_purge(&sk->sk_async_wait_queue);
2230 #endif
2231
2232 /* Clean prequeue, it must be empty really */
2233 __skb_queue_purge(&tp->ucopy.prequeue);
2234
2235 /* Clean up a referenced TCP bind bucket. */
2236 if (inet_csk(sk)->icsk_bind_hash)
2237 inet_put_port(sk);
2238
2239 BUG_ON(tp->fastopen_rsk != NULL);
2240
2241 /* If socket is aborted during connect operation */
2242 tcp_free_fastopen_req(tp);
2243
2244 sk_sockets_allocated_dec(sk);
2245 sock_release_memcg(sk);
2246 }
2247 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2248
2249 void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e)
2250 {
2251 unsigned int bucket;
2252 uid_t skuid = (uid_t)(uid_e.appuid);
2253 struct inet_connection_sock *icsk = NULL;//inet_csk(sk);
2254
2255
2256 for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
2257 struct hlist_nulls_node *node;
2258 struct sock *sk;
2259 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
2260
2261 spin_lock_bh(lock);
2262 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
2263
2264 if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
2265 continue;
2266 if (sock_flag(sk, SOCK_DEAD))
2267 continue;
2268
2269 if(sk->sk_socket){
2270 if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
2271 continue;
2272 else
2273 printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
2274 SOCK_INODE(sk->sk_socket)->i_uid);
2275 } else{
2276 continue;
2277 }
2278
2279 sock_hold(sk);
2280 spin_unlock_bh(lock);
2281
2282 local_bh_disable();
2283 bh_lock_sock(sk);
2284
2285 // update sk time out value
2286 icsk = inet_csk(sk);
2287 printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
2288
2289 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + 2);
2290 icsk->icsk_rto = sysctl_tcp_rto_min * 30;
2291 icsk->icsk_MMSRB = 1;
2292
2293 bh_unlock_sock(sk);
2294 local_bh_enable();
2295 spin_lock_bh(lock);
2296 sock_put(sk);
2297
2298 }
2299 spin_unlock_bh(lock);
2300 }
2301
2302 }
2303
2304
2305 /*
2306 * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
2307 */
2308 void tcp_v4_reset_connections_by_uid(struct uid_err uid_e)
2309 {
2310 unsigned int bucket;
2311 uid_t skuid = (uid_t)(uid_e.appuid);
2312
2313 for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
2314 struct hlist_nulls_node *node;
2315 struct sock *sk;
2316 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
2317
2318 restart:
2319 spin_lock_bh(lock);
2320 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
2321
2322 if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
2323 continue;
2324 if (sock_flag(sk, SOCK_DEAD))
2325 continue;
2326
2327 if(sk->sk_socket){
2328 if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
2329 continue;
2330 else
2331 printk(KERN_INFO "SIOCKILLSOCK socket uid(%d) match!",
2332 SOCK_INODE(sk->sk_socket)->i_uid);
2333 } else{
2334 continue;
2335 }
2336
2337 sock_hold(sk);
2338 spin_unlock_bh(lock);
2339
2340 local_bh_disable();
2341 bh_lock_sock(sk);
2342 sk->sk_err = uid_e.errNum;
2343 printk(KERN_INFO "SIOCKILLSOCK set sk err == %d!! \n", sk->sk_err);
2344 sk->sk_error_report(sk);
2345
2346 tcp_done(sk);
2347 bh_unlock_sock(sk);
2348 local_bh_enable();
2349 sock_put(sk);
2350
2351 goto restart;
2352 }
2353 spin_unlock_bh(lock);
2354 }
2355 }
2356
2357
2358 #ifdef CONFIG_PROC_FS
2359 /* Proc filesystem TCP sock list dumping. */
2360
2361 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2362 {
2363 return hlist_nulls_empty(head) ? NULL :
2364 list_entry(head->first, struct inet_timewait_sock, tw_node);
2365 }
2366
2367 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2368 {
2369 return !is_a_nulls(tw->tw_node.next) ?
2370 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2371 }
2372
2373 /*
2374 * Get next listener socket follow cur. If cur is NULL, get first socket
2375 * starting from bucket given in st->bucket; when st->bucket is zero the
2376 * very first socket in the hash table is returned.
2377 */
2378 static void *listening_get_next(struct seq_file *seq, void *cur)
2379 {
2380 struct inet_connection_sock *icsk;
2381 struct hlist_nulls_node *node;
2382 struct sock *sk = cur;
2383 struct inet_listen_hashbucket *ilb;
2384 struct tcp_iter_state *st = seq->private;
2385 struct net *net = seq_file_net(seq);
2386
2387 if (!sk) {
2388 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2389 spin_lock_bh(&ilb->lock);
2390 sk = sk_nulls_head(&ilb->head);
2391 st->offset = 0;
2392 goto get_sk;
2393 }
2394 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2395 ++st->num;
2396 ++st->offset;
2397
2398 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2399 struct request_sock *req = cur;
2400
2401 icsk = inet_csk(st->syn_wait_sk);
2402 req = req->dl_next;
2403 while (1) {
2404 while (req) {
2405 if (req->rsk_ops->family == st->family) {
2406 cur = req;
2407 goto out;
2408 }
2409 req = req->dl_next;
2410 }
2411 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2412 break;
2413 get_req:
2414 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2415 }
2416 sk = sk_nulls_next(st->syn_wait_sk);
2417 st->state = TCP_SEQ_STATE_LISTENING;
2418 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2419 } else {
2420 icsk = inet_csk(sk);
2421 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2422 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2423 goto start_req;
2424 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2425 sk = sk_nulls_next(sk);
2426 }
2427 get_sk:
2428 sk_nulls_for_each_from(sk, node) {
2429 if (!net_eq(sock_net(sk), net))
2430 continue;
2431 if (sk->sk_family == st->family) {
2432 cur = sk;
2433 goto out;
2434 }
2435 icsk = inet_csk(sk);
2436 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2437 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2438 start_req:
2439 st->uid = sock_i_uid(sk);
2440 st->syn_wait_sk = sk;
2441 st->state = TCP_SEQ_STATE_OPENREQ;
2442 st->sbucket = 0;
2443 goto get_req;
2444 }
2445 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2446 }
2447 spin_unlock_bh(&ilb->lock);
2448 st->offset = 0;
2449 if (++st->bucket < INET_LHTABLE_SIZE) {
2450 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2451 spin_lock_bh(&ilb->lock);
2452 sk = sk_nulls_head(&ilb->head);
2453 goto get_sk;
2454 }
2455 cur = NULL;
2456 out:
2457 return cur;
2458 }
2459
2460 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2461 {
2462 struct tcp_iter_state *st = seq->private;
2463 void *rc;
2464
2465 st->bucket = 0;
2466 st->offset = 0;
2467 rc = listening_get_next(seq, NULL);
2468
2469 while (rc && *pos) {
2470 rc = listening_get_next(seq, rc);
2471 --*pos;
2472 }
2473 return rc;
2474 }
2475
2476 static inline bool empty_bucket(struct tcp_iter_state *st)
2477 {
2478 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2479 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2480 }
2481
2482 /*
2483 * Get first established socket starting from bucket given in st->bucket.
2484 * If st->bucket is zero, the very first socket in the hash is returned.
2485 */
2486 static void *established_get_first(struct seq_file *seq)
2487 {
2488 struct tcp_iter_state *st = seq->private;
2489 struct net *net = seq_file_net(seq);
2490 void *rc = NULL;
2491
2492 st->offset = 0;
2493 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2494 struct sock *sk;
2495 struct hlist_nulls_node *node;
2496 struct inet_timewait_sock *tw;
2497 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2498
2499 /* Lockless fast path for the common case of empty buckets */
2500 if (empty_bucket(st))
2501 continue;
2502
2503 spin_lock_bh(lock);
2504 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2505 if (sk->sk_family != st->family ||
2506 !net_eq(sock_net(sk), net)) {
2507 continue;
2508 }
2509 rc = sk;
2510 goto out;
2511 }
2512 st->state = TCP_SEQ_STATE_TIME_WAIT;
2513 inet_twsk_for_each(tw, node,
2514 &tcp_hashinfo.ehash[st->bucket].twchain) {
2515 if (tw->tw_family != st->family ||
2516 !net_eq(twsk_net(tw), net)) {
2517 continue;
2518 }
2519 rc = tw;
2520 goto out;
2521 }
2522 spin_unlock_bh(lock);
2523 st->state = TCP_SEQ_STATE_ESTABLISHED;
2524 }
2525 out:
2526 return rc;
2527 }
2528
2529 static void *established_get_next(struct seq_file *seq, void *cur)
2530 {
2531 struct sock *sk = cur;
2532 struct inet_timewait_sock *tw;
2533 struct hlist_nulls_node *node;
2534 struct tcp_iter_state *st = seq->private;
2535 struct net *net = seq_file_net(seq);
2536
2537 ++st->num;
2538 ++st->offset;
2539
2540 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2541 tw = cur;
2542 tw = tw_next(tw);
2543 get_tw:
2544 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2545 tw = tw_next(tw);
2546 }
2547 if (tw) {
2548 cur = tw;
2549 goto out;
2550 }
2551 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2552 st->state = TCP_SEQ_STATE_ESTABLISHED;
2553
2554 /* Look for next non empty bucket */
2555 st->offset = 0;
2556 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2557 empty_bucket(st))
2558 ;
2559 if (st->bucket > tcp_hashinfo.ehash_mask)
2560 return NULL;
2561
2562 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2563 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2564 } else
2565 sk = sk_nulls_next(sk);
2566
2567 sk_nulls_for_each_from(sk, node) {
2568 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2569 goto found;
2570 }
2571
2572 st->state = TCP_SEQ_STATE_TIME_WAIT;
2573 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2574 goto get_tw;
2575 found:
2576 cur = sk;
2577 out:
2578 return cur;
2579 }
2580
2581 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2582 {
2583 struct tcp_iter_state *st = seq->private;
2584 void *rc;
2585
2586 st->bucket = 0;
2587 rc = established_get_first(seq);
2588
2589 while (rc && pos) {
2590 rc = established_get_next(seq, rc);
2591 --pos;
2592 }
2593 return rc;
2594 }
2595
2596 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2597 {
2598 void *rc;
2599 struct tcp_iter_state *st = seq->private;
2600
2601 st->state = TCP_SEQ_STATE_LISTENING;
2602 rc = listening_get_idx(seq, &pos);
2603
2604 if (!rc) {
2605 st->state = TCP_SEQ_STATE_ESTABLISHED;
2606 rc = established_get_idx(seq, pos);
2607 }
2608
2609 return rc;
2610 }
2611
2612 static void *tcp_seek_last_pos(struct seq_file *seq)
2613 {
2614 struct tcp_iter_state *st = seq->private;
2615 int offset = st->offset;
2616 int orig_num = st->num;
2617 void *rc = NULL;
2618
2619 switch (st->state) {
2620 case TCP_SEQ_STATE_OPENREQ:
2621 case TCP_SEQ_STATE_LISTENING:
2622 if (st->bucket >= INET_LHTABLE_SIZE)
2623 break;
2624 st->state = TCP_SEQ_STATE_LISTENING;
2625 rc = listening_get_next(seq, NULL);
2626 while (offset-- && rc)
2627 rc = listening_get_next(seq, rc);
2628 if (rc)
2629 break;
2630 st->bucket = 0;
2631 /* Fallthrough */
2632 case TCP_SEQ_STATE_ESTABLISHED:
2633 case TCP_SEQ_STATE_TIME_WAIT:
2634 st->state = TCP_SEQ_STATE_ESTABLISHED;
2635 if (st->bucket > tcp_hashinfo.ehash_mask)
2636 break;
2637 rc = established_get_first(seq);
2638 while (offset-- && rc)
2639 rc = established_get_next(seq, rc);
2640 }
2641
2642 st->num = orig_num;
2643
2644 return rc;
2645 }
2646
2647 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2648 {
2649 struct tcp_iter_state *st = seq->private;
2650 void *rc;
2651
2652 if (*pos && *pos == st->last_pos) {
2653 rc = tcp_seek_last_pos(seq);
2654 if (rc)
2655 goto out;
2656 }
2657
2658 st->state = TCP_SEQ_STATE_LISTENING;
2659 st->num = 0;
2660 st->bucket = 0;
2661 st->offset = 0;
2662 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2663
2664 out:
2665 st->last_pos = *pos;
2666 return rc;
2667 }
2668
2669 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2670 {
2671 struct tcp_iter_state *st = seq->private;
2672 void *rc = NULL;
2673
2674 if (v == SEQ_START_TOKEN) {
2675 rc = tcp_get_idx(seq, 0);
2676 goto out;
2677 }
2678
2679 switch (st->state) {
2680 case TCP_SEQ_STATE_OPENREQ:
2681 case TCP_SEQ_STATE_LISTENING:
2682 rc = listening_get_next(seq, v);
2683 if (!rc) {
2684 st->state = TCP_SEQ_STATE_ESTABLISHED;
2685 st->bucket = 0;
2686 st->offset = 0;
2687 rc = established_get_first(seq);
2688 }
2689 break;
2690 case TCP_SEQ_STATE_ESTABLISHED:
2691 case TCP_SEQ_STATE_TIME_WAIT:
2692 rc = established_get_next(seq, v);
2693 break;
2694 }
2695 out:
2696 ++*pos;
2697 st->last_pos = *pos;
2698 return rc;
2699 }
2700
2701 static void tcp_seq_stop(struct seq_file *seq, void *v)
2702 {
2703 struct tcp_iter_state *st = seq->private;
2704
2705 switch (st->state) {
2706 case TCP_SEQ_STATE_OPENREQ:
2707 if (v) {
2708 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2709 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2710 }
2711 case TCP_SEQ_STATE_LISTENING:
2712 if (v != SEQ_START_TOKEN)
2713 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2714 break;
2715 case TCP_SEQ_STATE_TIME_WAIT:
2716 case TCP_SEQ_STATE_ESTABLISHED:
2717 if (v)
2718 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2719 break;
2720 }
2721 }
2722
2723 int tcp_seq_open(struct inode *inode, struct file *file)
2724 {
2725 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2726 struct tcp_iter_state *s;
2727 int err;
2728
2729 err = seq_open_net(inode, file, &afinfo->seq_ops,
2730 sizeof(struct tcp_iter_state));
2731 if (err < 0)
2732 return err;
2733
2734 s = ((struct seq_file *)file->private_data)->private;
2735 s->family = afinfo->family;
2736 s->last_pos = 0;
2737 return 0;
2738 }
2739 EXPORT_SYMBOL(tcp_seq_open);
2740
2741 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2742 {
2743 int rc = 0;
2744 struct proc_dir_entry *p;
2745
2746 afinfo->seq_ops.start = tcp_seq_start;
2747 afinfo->seq_ops.next = tcp_seq_next;
2748 afinfo->seq_ops.stop = tcp_seq_stop;
2749
2750 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2751 afinfo->seq_fops, afinfo);
2752 if (!p)
2753 rc = -ENOMEM;
2754 return rc;
2755 }
2756 EXPORT_SYMBOL(tcp_proc_register);
2757
2758 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2759 {
2760 remove_proc_entry(afinfo->name, net->proc_net);
2761 }
2762 EXPORT_SYMBOL(tcp_proc_unregister);
2763
2764 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2765 struct seq_file *f, int i, kuid_t uid, int *len)
2766 {
2767 const struct inet_request_sock *ireq = inet_rsk(req);
2768 long delta = req->expires - jiffies;
2769
2770 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2771 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2772 i,
2773 ireq->loc_addr,
2774 ntohs(inet_sk(sk)->inet_sport),
2775 ireq->rmt_addr,
2776 ntohs(ireq->rmt_port),
2777 TCP_SYN_RECV,
2778 0, 0, /* could print option size, but that is af dependent. */
2779 1, /* timers active (only the expire timer) */
2780 jiffies_delta_to_clock_t(delta),
2781 req->num_timeout,
2782 from_kuid_munged(seq_user_ns(f), uid),
2783 0, /* non standard timer */
2784 0, /* open_requests have no inode */
2785 atomic_read(&sk->sk_refcnt),
2786 req,
2787 len);
2788 }
2789
2790 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2791 {
2792 int timer_active;
2793 unsigned long timer_expires;
2794 const struct tcp_sock *tp = tcp_sk(sk);
2795 const struct inet_connection_sock *icsk = inet_csk(sk);
2796 const struct inet_sock *inet = inet_sk(sk);
2797 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2798 __be32 dest = inet->inet_daddr;
2799 __be32 src = inet->inet_rcv_saddr;
2800 __u16 destp = ntohs(inet->inet_dport);
2801 __u16 srcp = ntohs(inet->inet_sport);
2802 int rx_queue;
2803
2804 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2805 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2806 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2807 timer_active = 1;
2808 timer_expires = icsk->icsk_timeout;
2809 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2810 timer_active = 4;
2811 timer_expires = icsk->icsk_timeout;
2812 } else if (timer_pending(&sk->sk_timer)) {
2813 timer_active = 2;
2814 timer_expires = sk->sk_timer.expires;
2815 } else {
2816 timer_active = 0;
2817 timer_expires = jiffies;
2818 }
2819
2820 if (sk->sk_state == TCP_LISTEN)
2821 rx_queue = sk->sk_ack_backlog;
2822 else
2823 /*
2824 * because we dont lock socket, we might find a transient negative value
2825 */
2826 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2827
2828 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2829 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2830 i, src, srcp, dest, destp, sk->sk_state,
2831 tp->write_seq - tp->snd_una,
2832 rx_queue,
2833 timer_active,
2834 jiffies_delta_to_clock_t(timer_expires - jiffies),
2835 icsk->icsk_retransmits,
2836 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2837 icsk->icsk_probes_out,
2838 sock_i_ino(sk),
2839 atomic_read(&sk->sk_refcnt), sk,
2840 jiffies_to_clock_t(icsk->icsk_rto),
2841 jiffies_to_clock_t(icsk->icsk_ack.ato),
2842 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2843 tp->snd_cwnd,
2844 sk->sk_state == TCP_LISTEN ?
2845 (fastopenq ? fastopenq->max_qlen : 0) :
2846 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2847 len);
2848 }
2849
2850 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2851 struct seq_file *f, int i, int *len)
2852 {
2853 __be32 dest, src;
2854 __u16 destp, srcp;
2855 long delta = tw->tw_ttd - jiffies;
2856
2857 dest = tw->tw_daddr;
2858 src = tw->tw_rcv_saddr;
2859 destp = ntohs(tw->tw_dport);
2860 srcp = ntohs(tw->tw_sport);
2861
2862 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2863 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2864 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2865 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2866 atomic_read(&tw->tw_refcnt), tw, len);
2867 }
2868
2869 #define TMPSZ 150
2870
2871 static int tcp4_seq_show(struct seq_file *seq, void *v)
2872 {
2873 struct tcp_iter_state *st;
2874 int len;
2875
2876 if (v == SEQ_START_TOKEN) {
2877 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2878 " sl local_address rem_address st tx_queue "
2879 "rx_queue tr tm->when retrnsmt uid timeout "
2880 "inode");
2881 goto out;
2882 }
2883 st = seq->private;
2884
2885 switch (st->state) {
2886 case TCP_SEQ_STATE_LISTENING:
2887 case TCP_SEQ_STATE_ESTABLISHED:
2888 get_tcp4_sock(v, seq, st->num, &len);
2889 break;
2890 case TCP_SEQ_STATE_OPENREQ:
2891 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2892 break;
2893 case TCP_SEQ_STATE_TIME_WAIT:
2894 get_timewait4_sock(v, seq, st->num, &len);
2895 break;
2896 }
2897 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2898 out:
2899 return 0;
2900 }
2901
2902 static const struct file_operations tcp_afinfo_seq_fops = {
2903 .owner = THIS_MODULE,
2904 .open = tcp_seq_open,
2905 .read = seq_read,
2906 .llseek = seq_lseek,
2907 .release = seq_release_net
2908 };
2909
2910 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2911 .name = "tcp",
2912 .family = AF_INET,
2913 .seq_fops = &tcp_afinfo_seq_fops,
2914 .seq_ops = {
2915 .show = tcp4_seq_show,
2916 },
2917 };
2918
2919 static int __net_init tcp4_proc_init_net(struct net *net)
2920 {
2921 return tcp_proc_register(net, &tcp4_seq_afinfo);
2922 }
2923
2924 static void __net_exit tcp4_proc_exit_net(struct net *net)
2925 {
2926 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2927 }
2928
2929 static struct pernet_operations tcp4_net_ops = {
2930 .init = tcp4_proc_init_net,
2931 .exit = tcp4_proc_exit_net,
2932 };
2933
2934 int __init tcp4_proc_init(void)
2935 {
2936 return register_pernet_subsys(&tcp4_net_ops);
2937 }
2938
2939 void tcp4_proc_exit(void)
2940 {
2941 unregister_pernet_subsys(&tcp4_net_ops);
2942 }
2943 #endif /* CONFIG_PROC_FS */
2944
2945 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2946 {
2947 const struct iphdr *iph = skb_gro_network_header(skb);
2948 __wsum wsum;
2949 __sum16 sum;
2950
2951 switch (skb->ip_summed) {
2952 case CHECKSUM_COMPLETE:
2953 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2954 skb->csum)) {
2955 skb->ip_summed = CHECKSUM_UNNECESSARY;
2956 break;
2957 }
2958 flush:
2959 NAPI_GRO_CB(skb)->flush = 1;
2960 return NULL;
2961
2962 case CHECKSUM_NONE:
2963 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2964 skb_gro_len(skb), IPPROTO_TCP, 0);
2965 sum = csum_fold(skb_checksum(skb,
2966 skb_gro_offset(skb),
2967 skb_gro_len(skb),
2968 wsum));
2969 if (sum)
2970 goto flush;
2971
2972 skb->ip_summed = CHECKSUM_UNNECESSARY;
2973 break;
2974 }
2975
2976 return tcp_gro_receive(head, skb);
2977 }
2978
2979 int tcp4_gro_complete(struct sk_buff *skb)
2980 {
2981 const struct iphdr *iph = ip_hdr(skb);
2982 struct tcphdr *th = tcp_hdr(skb);
2983
2984 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2985 iph->saddr, iph->daddr, 0);
2986 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2987
2988 return tcp_gro_complete(skb);
2989 }
2990
2991 struct proto tcp_prot = {
2992 .name = "TCP",
2993 .owner = THIS_MODULE,
2994 .close = tcp_close,
2995 .connect = tcp_v4_connect,
2996 .disconnect = tcp_disconnect,
2997 .accept = inet_csk_accept,
2998 .ioctl = tcp_ioctl,
2999 .init = tcp_v4_init_sock,
3000 .destroy = tcp_v4_destroy_sock,
3001 .shutdown = tcp_shutdown,
3002 .setsockopt = tcp_setsockopt,
3003 .getsockopt = tcp_getsockopt,
3004 .recvmsg = tcp_recvmsg,
3005 .sendmsg = tcp_sendmsg,
3006 .sendpage = tcp_sendpage,
3007 .backlog_rcv = tcp_v4_do_rcv,
3008 .release_cb = tcp_release_cb,
3009 .hash = inet_hash,
3010 .unhash = inet_unhash,
3011 .get_port = inet_csk_get_port,
3012 .enter_memory_pressure = tcp_enter_memory_pressure,
3013 .sockets_allocated = &tcp_sockets_allocated,
3014 .orphan_count = &tcp_orphan_count,
3015 .memory_allocated = &tcp_memory_allocated,
3016 .memory_pressure = &tcp_memory_pressure,
3017 .sysctl_wmem = sysctl_tcp_wmem,
3018 .sysctl_rmem = sysctl_tcp_rmem,
3019 .max_header = MAX_TCP_HEADER,
3020 .obj_size = sizeof(struct tcp_sock),
3021 .slab_flags = SLAB_DESTROY_BY_RCU,
3022 .twsk_prot = &tcp_timewait_sock_ops,
3023 .rsk_prot = &tcp_request_sock_ops,
3024 .h.hashinfo = &tcp_hashinfo,
3025 .no_autobind = true,
3026 #ifdef CONFIG_COMPAT
3027 .compat_setsockopt = compat_tcp_setsockopt,
3028 .compat_getsockopt = compat_tcp_getsockopt,
3029 #endif
3030 #ifdef CONFIG_MEMCG_KMEM
3031 .init_cgroup = tcp_init_cgroup,
3032 .destroy_cgroup = tcp_destroy_cgroup,
3033 .proto_cgroup = tcp_proto_cgroup,
3034 #endif
3035 };
3036 EXPORT_SYMBOL(tcp_prot);
3037
3038 static void __net_exit tcp_sk_exit(struct net *net)
3039 {
3040 int cpu;
3041
3042 for_each_possible_cpu(cpu)
3043 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
3044 free_percpu(net->ipv4.tcp_sk);
3045 }
3046
3047 static int __net_init tcp_sk_init(struct net *net)
3048 {
3049 int res, cpu;
3050
3051 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
3052 if (!net->ipv4.tcp_sk)
3053 return -ENOMEM;
3054
3055 for_each_possible_cpu(cpu) {
3056 struct sock *sk;
3057
3058 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
3059 IPPROTO_TCP, net);
3060 if (res)
3061 goto fail;
3062 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
3063 }
3064 net->ipv4.sysctl_tcp_ecn = 2;
3065 return 0;
3066
3067 fail:
3068 tcp_sk_exit(net);
3069
3070 return res;
3071 }
3072
3073 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
3074 {
3075 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
3076 }
3077
3078 static struct pernet_operations __net_initdata tcp_sk_ops = {
3079 .init = tcp_sk_init,
3080 .exit = tcp_sk_exit,
3081 .exit_batch = tcp_sk_exit_batch,
3082 };
3083
3084 void __init tcp_v4_init(void)
3085 {
3086 inet_hashinfo_init(&tcp_hashinfo);
3087 if (register_pernet_subsys(&tcp_sk_ops))
3088 panic("Failed to create the TCP control socket.\n");
3089 }