Merge remote-tracking branch 'regulator/topic/lp3971' into regulator-next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66
67 #include <asm/uaccess.h>
68
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
78
79 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80
81 static const struct inet_connection_sock_af_ops ipv6_mapped;
82 static const struct inet_connection_sock_af_ops ipv6_specific;
83 #ifdef CONFIG_TCP_MD5SIG
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86 #else
87 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
88 const struct in6_addr *addr)
89 {
90 return NULL;
91 }
92 #endif
93
94 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95 {
96 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 if (rt->rt6i_node)
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 }
105
106 static void tcp_v6_hash(struct sock *sk)
107 {
108 if (sk->sk_state != TCP_CLOSE) {
109 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
110 tcp_prot.hash(sk);
111 return;
112 }
113 local_bh_disable();
114 __inet6_hash(sk, NULL);
115 local_bh_enable();
116 }
117 }
118
119 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
120 {
121 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
122 ipv6_hdr(skb)->saddr.s6_addr32,
123 tcp_hdr(skb)->dest,
124 tcp_hdr(skb)->source);
125 }
126
127 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
128 int addr_len)
129 {
130 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
131 struct inet_sock *inet = inet_sk(sk);
132 struct inet_connection_sock *icsk = inet_csk(sk);
133 struct ipv6_pinfo *np = inet6_sk(sk);
134 struct tcp_sock *tp = tcp_sk(sk);
135 struct in6_addr *saddr = NULL, *final_p, final;
136 struct rt6_info *rt;
137 struct flowi6 fl6;
138 struct dst_entry *dst;
139 int addr_type;
140 int err;
141
142 if (addr_len < SIN6_LEN_RFC2133)
143 return -EINVAL;
144
145 if (usin->sin6_family != AF_INET6)
146 return -EAFNOSUPPORT;
147
148 memset(&fl6, 0, sizeof(fl6));
149
150 if (np->sndflow) {
151 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
152 IP6_ECN_flow_init(fl6.flowlabel);
153 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
154 struct ip6_flowlabel *flowlabel;
155 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
156 if (flowlabel == NULL)
157 return -EINVAL;
158 usin->sin6_addr = flowlabel->dst;
159 fl6_sock_release(flowlabel);
160 }
161 }
162
163 /*
164 * connect() to INADDR_ANY means loopback (BSD'ism).
165 */
166
167 if(ipv6_addr_any(&usin->sin6_addr))
168 usin->sin6_addr.s6_addr[15] = 0x1;
169
170 addr_type = ipv6_addr_type(&usin->sin6_addr);
171
172 if(addr_type & IPV6_ADDR_MULTICAST)
173 return -ENETUNREACH;
174
175 if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 if (addr_len >= sizeof(struct sockaddr_in6) &&
177 usin->sin6_scope_id) {
178 /* If interface is set while binding, indices
179 * must coincide.
180 */
181 if (sk->sk_bound_dev_if &&
182 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 return -EINVAL;
184
185 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 }
187
188 /* Connect to link-local address requires an interface */
189 if (!sk->sk_bound_dev_if)
190 return -EINVAL;
191 }
192
193 if (tp->rx_opt.ts_recent_stamp &&
194 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
197 tp->write_seq = 0;
198 }
199
200 np->daddr = usin->sin6_addr;
201 np->flow_label = fl6.flowlabel;
202
203 /*
204 * TCP over IPv4
205 */
206
207 if (addr_type == IPV6_ADDR_MAPPED) {
208 u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 struct sockaddr_in sin;
210
211 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212
213 if (__ipv6_only_sock(sk))
214 return -ENETUNREACH;
215
216 sin.sin_family = AF_INET;
217 sin.sin_port = usin->sin6_port;
218 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219
220 icsk->icsk_af_ops = &ipv6_mapped;
221 sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 #endif
225
226 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227
228 if (err) {
229 icsk->icsk_ext_hdr_len = exthdrlen;
230 icsk->icsk_af_ops = &ipv6_specific;
231 sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233 tp->af_specific = &tcp_sock_ipv6_specific;
234 #endif
235 goto failure;
236 } else {
237 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
239 &np->rcv_saddr);
240 }
241
242 return err;
243 }
244
245 if (!ipv6_addr_any(&np->rcv_saddr))
246 saddr = &np->rcv_saddr;
247
248 fl6.flowi6_proto = IPPROTO_TCP;
249 fl6.daddr = np->daddr;
250 fl6.saddr = saddr ? *saddr : np->saddr;
251 fl6.flowi6_oif = sk->sk_bound_dev_if;
252 fl6.flowi6_mark = sk->sk_mark;
253 fl6.fl6_dport = usin->sin6_port;
254 fl6.fl6_sport = inet->inet_sport;
255
256 final_p = fl6_update_dst(&fl6, np->opt, &final);
257
258 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259
260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
261 if (IS_ERR(dst)) {
262 err = PTR_ERR(dst);
263 goto failure;
264 }
265
266 if (saddr == NULL) {
267 saddr = &fl6.saddr;
268 np->rcv_saddr = *saddr;
269 }
270
271 /* set the source address */
272 np->saddr = *saddr;
273 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274
275 sk->sk_gso_type = SKB_GSO_TCPV6;
276 __ip6_dst_store(sk, dst, NULL, NULL);
277
278 rt = (struct rt6_info *) dst;
279 if (tcp_death_row.sysctl_tw_recycle &&
280 !tp->rx_opt.ts_recent_stamp &&
281 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
282 tcp_fetch_timewait_stamp(sk, dst);
283
284 icsk->icsk_ext_hdr_len = 0;
285 if (np->opt)
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 np->opt->opt_nflen);
288
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290
291 inet->inet_dport = usin->sin6_port;
292
293 tcp_set_state(sk, TCP_SYN_SENT);
294 err = inet6_hash_connect(&tcp_death_row, sk);
295 if (err)
296 goto late_failure;
297
298 if (!tp->write_seq && likely(!tp->repair))
299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
300 np->daddr.s6_addr32,
301 inet->inet_sport,
302 inet->inet_dport);
303
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310 late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
312 __sk_dst_reset(sk);
313 failure:
314 inet->inet_dport = 0;
315 sk->sk_route_caps = 0;
316 return err;
317 }
318
319 static void tcp_v6_mtu_reduced(struct sock *sk)
320 {
321 struct dst_entry *dst;
322
323 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
324 return;
325
326 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
327 if (!dst)
328 return;
329
330 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
331 tcp_sync_mss(sk, dst_mtu(dst));
332 tcp_simple_retransmit(sk);
333 }
334 }
335
336 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
337 u8 type, u8 code, int offset, __be32 info)
338 {
339 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
340 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
341 struct ipv6_pinfo *np;
342 struct sock *sk;
343 int err;
344 struct tcp_sock *tp;
345 __u32 seq;
346 struct net *net = dev_net(skb->dev);
347
348 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
349 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
350
351 if (sk == NULL) {
352 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
353 ICMP6_MIB_INERRORS);
354 return;
355 }
356
357 if (sk->sk_state == TCP_TIME_WAIT) {
358 inet_twsk_put(inet_twsk(sk));
359 return;
360 }
361
362 bh_lock_sock(sk);
363 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
364 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
365
366 if (sk->sk_state == TCP_CLOSE)
367 goto out;
368
369 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
370 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
371 goto out;
372 }
373
374 tp = tcp_sk(sk);
375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) {
378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 goto out;
380 }
381
382 np = inet6_sk(sk);
383
384 if (type == NDISC_REDIRECT) {
385 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
386
387 if (dst)
388 dst->ops->redirect(dst, sk, skb);
389 }
390
391 if (type == ICMPV6_PKT_TOOBIG) {
392 tp->mtu_info = ntohl(info);
393 if (!sock_owned_by_user(sk))
394 tcp_v6_mtu_reduced(sk);
395 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
396 &tp->tsq_flags))
397 sock_hold(sk);
398 goto out;
399 }
400
401 icmpv6_err_convert(type, code, &err);
402
403 /* Might be for an request_sock */
404 switch (sk->sk_state) {
405 struct request_sock *req, **prev;
406 case TCP_LISTEN:
407 if (sock_owned_by_user(sk))
408 goto out;
409
410 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
411 &hdr->saddr, inet6_iif(skb));
412 if (!req)
413 goto out;
414
415 /* ICMPs are not backlogged, hence we cannot get
416 * an established socket here.
417 */
418 WARN_ON(req->sk != NULL);
419
420 if (seq != tcp_rsk(req)->snt_isn) {
421 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
422 goto out;
423 }
424
425 inet_csk_reqsk_queue_drop(sk, req, prev);
426 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
427 goto out;
428
429 case TCP_SYN_SENT:
430 case TCP_SYN_RECV: /* Cannot happen.
431 It can, it SYNs are crossed. --ANK */
432 if (!sock_owned_by_user(sk)) {
433 sk->sk_err = err;
434 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
435
436 tcp_done(sk);
437 } else
438 sk->sk_err_soft = err;
439 goto out;
440 }
441
442 if (!sock_owned_by_user(sk) && np->recverr) {
443 sk->sk_err = err;
444 sk->sk_error_report(sk);
445 } else
446 sk->sk_err_soft = err;
447
448 out:
449 bh_unlock_sock(sk);
450 sock_put(sk);
451 }
452
453
454 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
455 struct flowi6 *fl6,
456 struct request_sock *req,
457 struct request_values *rvp,
458 u16 queue_mapping)
459 {
460 struct inet6_request_sock *treq = inet6_rsk(req);
461 struct ipv6_pinfo *np = inet6_sk(sk);
462 struct sk_buff * skb;
463 int err = -ENOMEM;
464
465 /* First, grab a route. */
466 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
467 goto done;
468
469 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
470
471 if (skb) {
472 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
473
474 fl6->daddr = treq->rmt_addr;
475 skb_set_queue_mapping(skb, queue_mapping);
476 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
477 err = net_xmit_eval(err);
478 }
479
480 done:
481 return err;
482 }
483
484 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
485 struct request_values *rvp)
486 {
487 struct flowi6 fl6;
488 int res;
489
490 res = tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
491 if (!res)
492 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
493 return res;
494 }
495
496 static void tcp_v6_reqsk_destructor(struct request_sock *req)
497 {
498 kfree_skb(inet6_rsk(req)->pktopts);
499 }
500
501 #ifdef CONFIG_TCP_MD5SIG
502 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
503 const struct in6_addr *addr)
504 {
505 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
506 }
507
508 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
509 struct sock *addr_sk)
510 {
511 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
512 }
513
514 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
515 struct request_sock *req)
516 {
517 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
518 }
519
520 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
521 int optlen)
522 {
523 struct tcp_md5sig cmd;
524 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
525
526 if (optlen < sizeof(cmd))
527 return -EINVAL;
528
529 if (copy_from_user(&cmd, optval, sizeof(cmd)))
530 return -EFAULT;
531
532 if (sin6->sin6_family != AF_INET6)
533 return -EINVAL;
534
535 if (!cmd.tcpm_keylen) {
536 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
537 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
538 AF_INET);
539 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
540 AF_INET6);
541 }
542
543 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
544 return -EINVAL;
545
546 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
547 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
548 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
549
550 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
551 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
552 }
553
554 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
555 const struct in6_addr *daddr,
556 const struct in6_addr *saddr, int nbytes)
557 {
558 struct tcp6_pseudohdr *bp;
559 struct scatterlist sg;
560
561 bp = &hp->md5_blk.ip6;
562 /* 1. TCP pseudo-header (RFC2460) */
563 bp->saddr = *saddr;
564 bp->daddr = *daddr;
565 bp->protocol = cpu_to_be32(IPPROTO_TCP);
566 bp->len = cpu_to_be32(nbytes);
567
568 sg_init_one(&sg, bp, sizeof(*bp));
569 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
570 }
571
572 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
573 const struct in6_addr *daddr, struct in6_addr *saddr,
574 const struct tcphdr *th)
575 {
576 struct tcp_md5sig_pool *hp;
577 struct hash_desc *desc;
578
579 hp = tcp_get_md5sig_pool();
580 if (!hp)
581 goto clear_hash_noput;
582 desc = &hp->md5_desc;
583
584 if (crypto_hash_init(desc))
585 goto clear_hash;
586 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
587 goto clear_hash;
588 if (tcp_md5_hash_header(hp, th))
589 goto clear_hash;
590 if (tcp_md5_hash_key(hp, key))
591 goto clear_hash;
592 if (crypto_hash_final(desc, md5_hash))
593 goto clear_hash;
594
595 tcp_put_md5sig_pool();
596 return 0;
597
598 clear_hash:
599 tcp_put_md5sig_pool();
600 clear_hash_noput:
601 memset(md5_hash, 0, 16);
602 return 1;
603 }
604
605 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
606 const struct sock *sk,
607 const struct request_sock *req,
608 const struct sk_buff *skb)
609 {
610 const struct in6_addr *saddr, *daddr;
611 struct tcp_md5sig_pool *hp;
612 struct hash_desc *desc;
613 const struct tcphdr *th = tcp_hdr(skb);
614
615 if (sk) {
616 saddr = &inet6_sk(sk)->saddr;
617 daddr = &inet6_sk(sk)->daddr;
618 } else if (req) {
619 saddr = &inet6_rsk(req)->loc_addr;
620 daddr = &inet6_rsk(req)->rmt_addr;
621 } else {
622 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
623 saddr = &ip6h->saddr;
624 daddr = &ip6h->daddr;
625 }
626
627 hp = tcp_get_md5sig_pool();
628 if (!hp)
629 goto clear_hash_noput;
630 desc = &hp->md5_desc;
631
632 if (crypto_hash_init(desc))
633 goto clear_hash;
634
635 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
636 goto clear_hash;
637 if (tcp_md5_hash_header(hp, th))
638 goto clear_hash;
639 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
640 goto clear_hash;
641 if (tcp_md5_hash_key(hp, key))
642 goto clear_hash;
643 if (crypto_hash_final(desc, md5_hash))
644 goto clear_hash;
645
646 tcp_put_md5sig_pool();
647 return 0;
648
649 clear_hash:
650 tcp_put_md5sig_pool();
651 clear_hash_noput:
652 memset(md5_hash, 0, 16);
653 return 1;
654 }
655
656 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
657 {
658 const __u8 *hash_location = NULL;
659 struct tcp_md5sig_key *hash_expected;
660 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
661 const struct tcphdr *th = tcp_hdr(skb);
662 int genhash;
663 u8 newhash[16];
664
665 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
666 hash_location = tcp_parse_md5sig_option(th);
667
668 /* We've parsed the options - do we have a hash? */
669 if (!hash_expected && !hash_location)
670 return 0;
671
672 if (hash_expected && !hash_location) {
673 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
674 return 1;
675 }
676
677 if (!hash_expected && hash_location) {
678 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
679 return 1;
680 }
681
682 /* check the signature */
683 genhash = tcp_v6_md5_hash_skb(newhash,
684 hash_expected,
685 NULL, NULL, skb);
686
687 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
688 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
689 genhash ? "failed" : "mismatch",
690 &ip6h->saddr, ntohs(th->source),
691 &ip6h->daddr, ntohs(th->dest));
692 return 1;
693 }
694 return 0;
695 }
696 #endif
697
698 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
699 .family = AF_INET6,
700 .obj_size = sizeof(struct tcp6_request_sock),
701 .rtx_syn_ack = tcp_v6_rtx_synack,
702 .send_ack = tcp_v6_reqsk_send_ack,
703 .destructor = tcp_v6_reqsk_destructor,
704 .send_reset = tcp_v6_send_reset,
705 .syn_ack_timeout = tcp_syn_ack_timeout,
706 };
707
708 #ifdef CONFIG_TCP_MD5SIG
709 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
710 .md5_lookup = tcp_v6_reqsk_md5_lookup,
711 .calc_md5_hash = tcp_v6_md5_hash_skb,
712 };
713 #endif
714
715 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
716 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
717 {
718 const struct tcphdr *th = tcp_hdr(skb);
719 struct tcphdr *t1;
720 struct sk_buff *buff;
721 struct flowi6 fl6;
722 struct net *net = dev_net(skb_dst(skb)->dev);
723 struct sock *ctl_sk = net->ipv6.tcp_sk;
724 unsigned int tot_len = sizeof(struct tcphdr);
725 struct dst_entry *dst;
726 __be32 *topt;
727
728 if (ts)
729 tot_len += TCPOLEN_TSTAMP_ALIGNED;
730 #ifdef CONFIG_TCP_MD5SIG
731 if (key)
732 tot_len += TCPOLEN_MD5SIG_ALIGNED;
733 #endif
734
735 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
736 GFP_ATOMIC);
737 if (buff == NULL)
738 return;
739
740 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
741
742 t1 = (struct tcphdr *) skb_push(buff, tot_len);
743 skb_reset_transport_header(buff);
744
745 /* Swap the send and the receive. */
746 memset(t1, 0, sizeof(*t1));
747 t1->dest = th->source;
748 t1->source = th->dest;
749 t1->doff = tot_len / 4;
750 t1->seq = htonl(seq);
751 t1->ack_seq = htonl(ack);
752 t1->ack = !rst || !th->ack;
753 t1->rst = rst;
754 t1->window = htons(win);
755
756 topt = (__be32 *)(t1 + 1);
757
758 if (ts) {
759 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
760 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
761 *topt++ = htonl(tcp_time_stamp);
762 *topt++ = htonl(ts);
763 }
764
765 #ifdef CONFIG_TCP_MD5SIG
766 if (key) {
767 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
768 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
769 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
770 &ipv6_hdr(skb)->saddr,
771 &ipv6_hdr(skb)->daddr, t1);
772 }
773 #endif
774
775 memset(&fl6, 0, sizeof(fl6));
776 fl6.daddr = ipv6_hdr(skb)->saddr;
777 fl6.saddr = ipv6_hdr(skb)->daddr;
778
779 buff->ip_summed = CHECKSUM_PARTIAL;
780 buff->csum = 0;
781
782 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
783
784 fl6.flowi6_proto = IPPROTO_TCP;
785 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
786 fl6.flowi6_oif = inet6_iif(skb);
787 fl6.fl6_dport = t1->dest;
788 fl6.fl6_sport = t1->source;
789 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
790
791 /* Pass a socket to ip6_dst_lookup either it is for RST
792 * Underlying function will use this to retrieve the network
793 * namespace
794 */
795 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
796 if (!IS_ERR(dst)) {
797 skb_dst_set(buff, dst);
798 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
799 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
800 if (rst)
801 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
802 return;
803 }
804
805 kfree_skb(buff);
806 }
807
808 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
809 {
810 const struct tcphdr *th = tcp_hdr(skb);
811 u32 seq = 0, ack_seq = 0;
812 struct tcp_md5sig_key *key = NULL;
813 #ifdef CONFIG_TCP_MD5SIG
814 const __u8 *hash_location = NULL;
815 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
816 unsigned char newhash[16];
817 int genhash;
818 struct sock *sk1 = NULL;
819 #endif
820
821 if (th->rst)
822 return;
823
824 if (!ipv6_unicast_destination(skb))
825 return;
826
827 #ifdef CONFIG_TCP_MD5SIG
828 hash_location = tcp_parse_md5sig_option(th);
829 if (!sk && hash_location) {
830 /*
831 * active side is lost. Try to find listening socket through
832 * source port, and then find md5 key through listening socket.
833 * we are not loose security here:
834 * Incoming packet is checked with md5 hash with finding key,
835 * no RST generated if md5 hash doesn't match.
836 */
837 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
838 &tcp_hashinfo, &ipv6h->daddr,
839 ntohs(th->source), inet6_iif(skb));
840 if (!sk1)
841 return;
842
843 rcu_read_lock();
844 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
845 if (!key)
846 goto release_sk1;
847
848 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
849 if (genhash || memcmp(hash_location, newhash, 16) != 0)
850 goto release_sk1;
851 } else {
852 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
853 }
854 #endif
855
856 if (th->ack)
857 seq = ntohl(th->ack_seq);
858 else
859 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
860 (th->doff << 2);
861
862 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
863
864 #ifdef CONFIG_TCP_MD5SIG
865 release_sk1:
866 if (sk1) {
867 rcu_read_unlock();
868 sock_put(sk1);
869 }
870 #endif
871 }
872
873 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
874 struct tcp_md5sig_key *key, u8 tclass)
875 {
876 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
877 }
878
879 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
880 {
881 struct inet_timewait_sock *tw = inet_twsk(sk);
882 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
883
884 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
885 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
886 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
887 tw->tw_tclass);
888
889 inet_twsk_put(tw);
890 }
891
892 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
893 struct request_sock *req)
894 {
895 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
896 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
897 }
898
899
900 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
901 {
902 struct request_sock *req, **prev;
903 const struct tcphdr *th = tcp_hdr(skb);
904 struct sock *nsk;
905
906 /* Find possible connection requests. */
907 req = inet6_csk_search_req(sk, &prev, th->source,
908 &ipv6_hdr(skb)->saddr,
909 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
910 if (req)
911 return tcp_check_req(sk, skb, req, prev, false);
912
913 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
914 &ipv6_hdr(skb)->saddr, th->source,
915 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
916
917 if (nsk) {
918 if (nsk->sk_state != TCP_TIME_WAIT) {
919 bh_lock_sock(nsk);
920 return nsk;
921 }
922 inet_twsk_put(inet_twsk(nsk));
923 return NULL;
924 }
925
926 #ifdef CONFIG_SYN_COOKIES
927 if (!th->syn)
928 sk = cookie_v6_check(sk, skb);
929 #endif
930 return sk;
931 }
932
933 /* FIXME: this is substantially similar to the ipv4 code.
934 * Can some kind of merge be done? -- erics
935 */
936 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
937 {
938 struct tcp_extend_values tmp_ext;
939 struct tcp_options_received tmp_opt;
940 const u8 *hash_location;
941 struct request_sock *req;
942 struct inet6_request_sock *treq;
943 struct ipv6_pinfo *np = inet6_sk(sk);
944 struct tcp_sock *tp = tcp_sk(sk);
945 __u32 isn = TCP_SKB_CB(skb)->when;
946 struct dst_entry *dst = NULL;
947 struct flowi6 fl6;
948 bool want_cookie = false;
949
950 if (skb->protocol == htons(ETH_P_IP))
951 return tcp_v4_conn_request(sk, skb);
952
953 if (!ipv6_unicast_destination(skb))
954 goto drop;
955
956 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
957 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
958 if (!want_cookie)
959 goto drop;
960 }
961
962 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
963 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
964 goto drop;
965 }
966
967 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
968 if (req == NULL)
969 goto drop;
970
971 #ifdef CONFIG_TCP_MD5SIG
972 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
973 #endif
974
975 tcp_clear_options(&tmp_opt);
976 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
977 tmp_opt.user_mss = tp->rx_opt.user_mss;
978 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
979
980 if (tmp_opt.cookie_plus > 0 &&
981 tmp_opt.saw_tstamp &&
982 !tp->rx_opt.cookie_out_never &&
983 (sysctl_tcp_cookie_size > 0 ||
984 (tp->cookie_values != NULL &&
985 tp->cookie_values->cookie_desired > 0))) {
986 u8 *c;
987 u32 *d;
988 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
989 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
990
991 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
992 goto drop_and_free;
993
994 /* Secret recipe starts with IP addresses */
995 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
996 *mess++ ^= *d++;
997 *mess++ ^= *d++;
998 *mess++ ^= *d++;
999 *mess++ ^= *d++;
1000 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1001 *mess++ ^= *d++;
1002 *mess++ ^= *d++;
1003 *mess++ ^= *d++;
1004 *mess++ ^= *d++;
1005
1006 /* plus variable length Initiator Cookie */
1007 c = (u8 *)mess;
1008 while (l-- > 0)
1009 *c++ ^= *hash_location++;
1010
1011 want_cookie = false; /* not our kind of cookie */
1012 tmp_ext.cookie_out_never = 0; /* false */
1013 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1014 } else if (!tp->rx_opt.cookie_in_always) {
1015 /* redundant indications, but ensure initialization. */
1016 tmp_ext.cookie_out_never = 1; /* true */
1017 tmp_ext.cookie_plus = 0;
1018 } else {
1019 goto drop_and_free;
1020 }
1021 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1022
1023 if (want_cookie && !tmp_opt.saw_tstamp)
1024 tcp_clear_options(&tmp_opt);
1025
1026 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1027 tcp_openreq_init(req, &tmp_opt, skb);
1028
1029 treq = inet6_rsk(req);
1030 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1031 treq->loc_addr = ipv6_hdr(skb)->daddr;
1032 if (!want_cookie || tmp_opt.tstamp_ok)
1033 TCP_ECN_create_request(req, skb);
1034
1035 treq->iif = sk->sk_bound_dev_if;
1036
1037 /* So that link locals have meaning */
1038 if (!sk->sk_bound_dev_if &&
1039 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1040 treq->iif = inet6_iif(skb);
1041
1042 if (!isn) {
1043 if (ipv6_opt_accepted(sk, skb) ||
1044 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1045 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1046 atomic_inc(&skb->users);
1047 treq->pktopts = skb;
1048 }
1049
1050 if (want_cookie) {
1051 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1052 req->cookie_ts = tmp_opt.tstamp_ok;
1053 goto have_isn;
1054 }
1055
1056 /* VJ's idea. We save last timestamp seen
1057 * from the destination in peer table, when entering
1058 * state TIME-WAIT, and check against it before
1059 * accepting new connection request.
1060 *
1061 * If "isn" is not zero, this request hit alive
1062 * timewait bucket, so that all the necessary checks
1063 * are made in the function processing timewait state.
1064 */
1065 if (tmp_opt.saw_tstamp &&
1066 tcp_death_row.sysctl_tw_recycle &&
1067 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1068 if (!tcp_peer_is_proven(req, dst, true)) {
1069 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1070 goto drop_and_release;
1071 }
1072 }
1073 /* Kill the following clause, if you dislike this way. */
1074 else if (!sysctl_tcp_syncookies &&
1075 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1076 (sysctl_max_syn_backlog >> 2)) &&
1077 !tcp_peer_is_proven(req, dst, false)) {
1078 /* Without syncookies last quarter of
1079 * backlog is filled with destinations,
1080 * proven to be alive.
1081 * It means that we continue to communicate
1082 * to destinations, already remembered
1083 * to the moment of synflood.
1084 */
1085 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1086 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1087 goto drop_and_release;
1088 }
1089
1090 isn = tcp_v6_init_sequence(skb);
1091 }
1092 have_isn:
1093 tcp_rsk(req)->snt_isn = isn;
1094
1095 if (security_inet_conn_request(sk, skb, req))
1096 goto drop_and_release;
1097
1098 if (tcp_v6_send_synack(sk, dst, &fl6, req,
1099 (struct request_values *)&tmp_ext,
1100 skb_get_queue_mapping(skb)) ||
1101 want_cookie)
1102 goto drop_and_free;
1103
1104 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1105 tcp_rsk(req)->listener = NULL;
1106 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1107 return 0;
1108
1109 drop_and_release:
1110 dst_release(dst);
1111 drop_and_free:
1112 reqsk_free(req);
1113 drop:
1114 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1115 return 0; /* don't send reset */
1116 }
1117
1118 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1119 struct request_sock *req,
1120 struct dst_entry *dst)
1121 {
1122 struct inet6_request_sock *treq;
1123 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1124 struct tcp6_sock *newtcp6sk;
1125 struct inet_sock *newinet;
1126 struct tcp_sock *newtp;
1127 struct sock *newsk;
1128 #ifdef CONFIG_TCP_MD5SIG
1129 struct tcp_md5sig_key *key;
1130 #endif
1131 struct flowi6 fl6;
1132
1133 if (skb->protocol == htons(ETH_P_IP)) {
1134 /*
1135 * v6 mapped
1136 */
1137
1138 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1139
1140 if (newsk == NULL)
1141 return NULL;
1142
1143 newtcp6sk = (struct tcp6_sock *)newsk;
1144 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1145
1146 newinet = inet_sk(newsk);
1147 newnp = inet6_sk(newsk);
1148 newtp = tcp_sk(newsk);
1149
1150 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1151
1152 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1153
1154 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1155
1156 newnp->rcv_saddr = newnp->saddr;
1157
1158 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1159 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1160 #ifdef CONFIG_TCP_MD5SIG
1161 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1162 #endif
1163
1164 newnp->ipv6_ac_list = NULL;
1165 newnp->ipv6_fl_list = NULL;
1166 newnp->pktoptions = NULL;
1167 newnp->opt = NULL;
1168 newnp->mcast_oif = inet6_iif(skb);
1169 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1170 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1171
1172 /*
1173 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1174 * here, tcp_create_openreq_child now does this for us, see the comment in
1175 * that function for the gory details. -acme
1176 */
1177
1178 /* It is tricky place. Until this moment IPv4 tcp
1179 worked with IPv6 icsk.icsk_af_ops.
1180 Sync it now.
1181 */
1182 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1183
1184 return newsk;
1185 }
1186
1187 treq = inet6_rsk(req);
1188
1189 if (sk_acceptq_is_full(sk))
1190 goto out_overflow;
1191
1192 if (!dst) {
1193 dst = inet6_csk_route_req(sk, &fl6, req);
1194 if (!dst)
1195 goto out;
1196 }
1197
1198 newsk = tcp_create_openreq_child(sk, req, skb);
1199 if (newsk == NULL)
1200 goto out_nonewsk;
1201
1202 /*
1203 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1204 * count here, tcp_create_openreq_child now does this for us, see the
1205 * comment in that function for the gory details. -acme
1206 */
1207
1208 newsk->sk_gso_type = SKB_GSO_TCPV6;
1209 __ip6_dst_store(newsk, dst, NULL, NULL);
1210 inet6_sk_rx_dst_set(newsk, skb);
1211
1212 newtcp6sk = (struct tcp6_sock *)newsk;
1213 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1214
1215 newtp = tcp_sk(newsk);
1216 newinet = inet_sk(newsk);
1217 newnp = inet6_sk(newsk);
1218
1219 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1220
1221 newnp->daddr = treq->rmt_addr;
1222 newnp->saddr = treq->loc_addr;
1223 newnp->rcv_saddr = treq->loc_addr;
1224 newsk->sk_bound_dev_if = treq->iif;
1225
1226 /* Now IPv6 options...
1227
1228 First: no IPv4 options.
1229 */
1230 newinet->inet_opt = NULL;
1231 newnp->ipv6_ac_list = NULL;
1232 newnp->ipv6_fl_list = NULL;
1233
1234 /* Clone RX bits */
1235 newnp->rxopt.all = np->rxopt.all;
1236
1237 /* Clone pktoptions received with SYN */
1238 newnp->pktoptions = NULL;
1239 if (treq->pktopts != NULL) {
1240 newnp->pktoptions = skb_clone(treq->pktopts,
1241 sk_gfp_atomic(sk, GFP_ATOMIC));
1242 consume_skb(treq->pktopts);
1243 treq->pktopts = NULL;
1244 if (newnp->pktoptions)
1245 skb_set_owner_r(newnp->pktoptions, newsk);
1246 }
1247 newnp->opt = NULL;
1248 newnp->mcast_oif = inet6_iif(skb);
1249 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1250 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1251
1252 /* Clone native IPv6 options from listening socket (if any)
1253
1254 Yes, keeping reference count would be much more clever,
1255 but we make one more one thing there: reattach optmem
1256 to newsk.
1257 */
1258 if (np->opt)
1259 newnp->opt = ipv6_dup_options(newsk, np->opt);
1260
1261 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1262 if (newnp->opt)
1263 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1264 newnp->opt->opt_flen);
1265
1266 tcp_mtup_init(newsk);
1267 tcp_sync_mss(newsk, dst_mtu(dst));
1268 newtp->advmss = dst_metric_advmss(dst);
1269 if (tcp_sk(sk)->rx_opt.user_mss &&
1270 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1271 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1272
1273 tcp_initialize_rcv_mss(newsk);
1274 tcp_synack_rtt_meas(newsk, req);
1275 newtp->total_retrans = req->num_retrans;
1276
1277 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1278 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1279
1280 #ifdef CONFIG_TCP_MD5SIG
1281 /* Copy over the MD5 key from the original socket */
1282 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1283 /* We're using one, so create a matching key
1284 * on the newsk structure. If we fail to get
1285 * memory, then we end up not copying the key
1286 * across. Shucks.
1287 */
1288 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1289 AF_INET6, key->key, key->keylen,
1290 sk_gfp_atomic(sk, GFP_ATOMIC));
1291 }
1292 #endif
1293
1294 if (__inet_inherit_port(sk, newsk) < 0) {
1295 inet_csk_prepare_forced_close(newsk);
1296 tcp_done(newsk);
1297 goto out;
1298 }
1299 __inet6_hash(newsk, NULL);
1300
1301 return newsk;
1302
1303 out_overflow:
1304 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1305 out_nonewsk:
1306 dst_release(dst);
1307 out:
1308 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1309 return NULL;
1310 }
1311
1312 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1313 {
1314 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1315 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1316 &ipv6_hdr(skb)->daddr, skb->csum)) {
1317 skb->ip_summed = CHECKSUM_UNNECESSARY;
1318 return 0;
1319 }
1320 }
1321
1322 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1323 &ipv6_hdr(skb)->saddr,
1324 &ipv6_hdr(skb)->daddr, 0));
1325
1326 if (skb->len <= 76) {
1327 return __skb_checksum_complete(skb);
1328 }
1329 return 0;
1330 }
1331
1332 /* The socket must have it's spinlock held when we get
1333 * here.
1334 *
1335 * We have a potential double-lock case here, so even when
1336 * doing backlog processing we use the BH locking scheme.
1337 * This is because we cannot sleep with the original spinlock
1338 * held.
1339 */
1340 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1341 {
1342 struct ipv6_pinfo *np = inet6_sk(sk);
1343 struct tcp_sock *tp;
1344 struct sk_buff *opt_skb = NULL;
1345
1346 /* Imagine: socket is IPv6. IPv4 packet arrives,
1347 goes to IPv4 receive handler and backlogged.
1348 From backlog it always goes here. Kerboom...
1349 Fortunately, tcp_rcv_established and rcv_established
1350 handle them correctly, but it is not case with
1351 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1352 */
1353
1354 if (skb->protocol == htons(ETH_P_IP))
1355 return tcp_v4_do_rcv(sk, skb);
1356
1357 #ifdef CONFIG_TCP_MD5SIG
1358 if (tcp_v6_inbound_md5_hash (sk, skb))
1359 goto discard;
1360 #endif
1361
1362 if (sk_filter(sk, skb))
1363 goto discard;
1364
1365 /*
1366 * socket locking is here for SMP purposes as backlog rcv
1367 * is currently called with bh processing disabled.
1368 */
1369
1370 /* Do Stevens' IPV6_PKTOPTIONS.
1371
1372 Yes, guys, it is the only place in our code, where we
1373 may make it not affecting IPv4.
1374 The rest of code is protocol independent,
1375 and I do not like idea to uglify IPv4.
1376
1377 Actually, all the idea behind IPV6_PKTOPTIONS
1378 looks not very well thought. For now we latch
1379 options, received in the last packet, enqueued
1380 by tcp. Feel free to propose better solution.
1381 --ANK (980728)
1382 */
1383 if (np->rxopt.all)
1384 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1385
1386 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1387 struct dst_entry *dst = sk->sk_rx_dst;
1388
1389 sock_rps_save_rxhash(sk, skb);
1390 if (dst) {
1391 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1392 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1393 dst_release(dst);
1394 sk->sk_rx_dst = NULL;
1395 }
1396 }
1397
1398 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1399 goto reset;
1400 if (opt_skb)
1401 goto ipv6_pktoptions;
1402 return 0;
1403 }
1404
1405 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1406 goto csum_err;
1407
1408 if (sk->sk_state == TCP_LISTEN) {
1409 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1410 if (!nsk)
1411 goto discard;
1412
1413 /*
1414 * Queue it on the new socket if the new socket is active,
1415 * otherwise we just shortcircuit this and continue with
1416 * the new socket..
1417 */
1418 if(nsk != sk) {
1419 sock_rps_save_rxhash(nsk, skb);
1420 if (tcp_child_process(sk, nsk, skb))
1421 goto reset;
1422 if (opt_skb)
1423 __kfree_skb(opt_skb);
1424 return 0;
1425 }
1426 } else
1427 sock_rps_save_rxhash(sk, skb);
1428
1429 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1430 goto reset;
1431 if (opt_skb)
1432 goto ipv6_pktoptions;
1433 return 0;
1434
1435 reset:
1436 tcp_v6_send_reset(sk, skb);
1437 discard:
1438 if (opt_skb)
1439 __kfree_skb(opt_skb);
1440 kfree_skb(skb);
1441 return 0;
1442 csum_err:
1443 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1444 goto discard;
1445
1446
1447 ipv6_pktoptions:
1448 /* Do you ask, what is it?
1449
1450 1. skb was enqueued by tcp.
1451 2. skb is added to tail of read queue, rather than out of order.
1452 3. socket is not in passive state.
1453 4. Finally, it really contains options, which user wants to receive.
1454 */
1455 tp = tcp_sk(sk);
1456 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1457 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1458 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1459 np->mcast_oif = inet6_iif(opt_skb);
1460 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1461 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1462 if (np->rxopt.bits.rxtclass)
1463 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1464 if (ipv6_opt_accepted(sk, opt_skb)) {
1465 skb_set_owner_r(opt_skb, sk);
1466 opt_skb = xchg(&np->pktoptions, opt_skb);
1467 } else {
1468 __kfree_skb(opt_skb);
1469 opt_skb = xchg(&np->pktoptions, NULL);
1470 }
1471 }
1472
1473 kfree_skb(opt_skb);
1474 return 0;
1475 }
1476
1477 static int tcp_v6_rcv(struct sk_buff *skb)
1478 {
1479 const struct tcphdr *th;
1480 const struct ipv6hdr *hdr;
1481 struct sock *sk;
1482 int ret;
1483 struct net *net = dev_net(skb->dev);
1484
1485 if (skb->pkt_type != PACKET_HOST)
1486 goto discard_it;
1487
1488 /*
1489 * Count it even if it's bad.
1490 */
1491 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1492
1493 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1494 goto discard_it;
1495
1496 th = tcp_hdr(skb);
1497
1498 if (th->doff < sizeof(struct tcphdr)/4)
1499 goto bad_packet;
1500 if (!pskb_may_pull(skb, th->doff*4))
1501 goto discard_it;
1502
1503 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1504 goto bad_packet;
1505
1506 th = tcp_hdr(skb);
1507 hdr = ipv6_hdr(skb);
1508 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1509 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1510 skb->len - th->doff*4);
1511 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1512 TCP_SKB_CB(skb)->when = 0;
1513 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1514 TCP_SKB_CB(skb)->sacked = 0;
1515
1516 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1517 if (!sk)
1518 goto no_tcp_socket;
1519
1520 process:
1521 if (sk->sk_state == TCP_TIME_WAIT)
1522 goto do_time_wait;
1523
1524 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1525 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1526 goto discard_and_relse;
1527 }
1528
1529 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1530 goto discard_and_relse;
1531
1532 if (sk_filter(sk, skb))
1533 goto discard_and_relse;
1534
1535 skb->dev = NULL;
1536
1537 bh_lock_sock_nested(sk);
1538 ret = 0;
1539 if (!sock_owned_by_user(sk)) {
1540 #ifdef CONFIG_NET_DMA
1541 struct tcp_sock *tp = tcp_sk(sk);
1542 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1543 tp->ucopy.dma_chan = net_dma_find_channel();
1544 if (tp->ucopy.dma_chan)
1545 ret = tcp_v6_do_rcv(sk, skb);
1546 else
1547 #endif
1548 {
1549 if (!tcp_prequeue(sk, skb))
1550 ret = tcp_v6_do_rcv(sk, skb);
1551 }
1552 } else if (unlikely(sk_add_backlog(sk, skb,
1553 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1554 bh_unlock_sock(sk);
1555 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1556 goto discard_and_relse;
1557 }
1558 bh_unlock_sock(sk);
1559
1560 sock_put(sk);
1561 return ret ? -1 : 0;
1562
1563 no_tcp_socket:
1564 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1565 goto discard_it;
1566
1567 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1568 bad_packet:
1569 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1570 } else {
1571 tcp_v6_send_reset(NULL, skb);
1572 }
1573
1574 discard_it:
1575
1576 /*
1577 * Discard frame
1578 */
1579
1580 kfree_skb(skb);
1581 return 0;
1582
1583 discard_and_relse:
1584 sock_put(sk);
1585 goto discard_it;
1586
1587 do_time_wait:
1588 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1589 inet_twsk_put(inet_twsk(sk));
1590 goto discard_it;
1591 }
1592
1593 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1594 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1595 inet_twsk_put(inet_twsk(sk));
1596 goto discard_it;
1597 }
1598
1599 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1600 case TCP_TW_SYN:
1601 {
1602 struct sock *sk2;
1603
1604 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1605 &ipv6_hdr(skb)->daddr,
1606 ntohs(th->dest), inet6_iif(skb));
1607 if (sk2 != NULL) {
1608 struct inet_timewait_sock *tw = inet_twsk(sk);
1609 inet_twsk_deschedule(tw, &tcp_death_row);
1610 inet_twsk_put(tw);
1611 sk = sk2;
1612 goto process;
1613 }
1614 /* Fall through to ACK */
1615 }
1616 case TCP_TW_ACK:
1617 tcp_v6_timewait_ack(sk, skb);
1618 break;
1619 case TCP_TW_RST:
1620 goto no_tcp_socket;
1621 case TCP_TW_SUCCESS:;
1622 }
1623 goto discard_it;
1624 }
1625
1626 static void tcp_v6_early_demux(struct sk_buff *skb)
1627 {
1628 const struct ipv6hdr *hdr;
1629 const struct tcphdr *th;
1630 struct sock *sk;
1631
1632 if (skb->pkt_type != PACKET_HOST)
1633 return;
1634
1635 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1636 return;
1637
1638 hdr = ipv6_hdr(skb);
1639 th = tcp_hdr(skb);
1640
1641 if (th->doff < sizeof(struct tcphdr) / 4)
1642 return;
1643
1644 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1645 &hdr->saddr, th->source,
1646 &hdr->daddr, ntohs(th->dest),
1647 inet6_iif(skb));
1648 if (sk) {
1649 skb->sk = sk;
1650 skb->destructor = sock_edemux;
1651 if (sk->sk_state != TCP_TIME_WAIT) {
1652 struct dst_entry *dst = sk->sk_rx_dst;
1653
1654 if (dst)
1655 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1656 if (dst &&
1657 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1658 skb_dst_set_noref(skb, dst);
1659 }
1660 }
1661 }
1662
1663 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1664 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1665 .twsk_unique = tcp_twsk_unique,
1666 .twsk_destructor= tcp_twsk_destructor,
1667 };
1668
1669 static const struct inet_connection_sock_af_ops ipv6_specific = {
1670 .queue_xmit = inet6_csk_xmit,
1671 .send_check = tcp_v6_send_check,
1672 .rebuild_header = inet6_sk_rebuild_header,
1673 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1674 .conn_request = tcp_v6_conn_request,
1675 .syn_recv_sock = tcp_v6_syn_recv_sock,
1676 .net_header_len = sizeof(struct ipv6hdr),
1677 .net_frag_header_len = sizeof(struct frag_hdr),
1678 .setsockopt = ipv6_setsockopt,
1679 .getsockopt = ipv6_getsockopt,
1680 .addr2sockaddr = inet6_csk_addr2sockaddr,
1681 .sockaddr_len = sizeof(struct sockaddr_in6),
1682 .bind_conflict = inet6_csk_bind_conflict,
1683 #ifdef CONFIG_COMPAT
1684 .compat_setsockopt = compat_ipv6_setsockopt,
1685 .compat_getsockopt = compat_ipv6_getsockopt,
1686 #endif
1687 };
1688
1689 #ifdef CONFIG_TCP_MD5SIG
1690 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1691 .md5_lookup = tcp_v6_md5_lookup,
1692 .calc_md5_hash = tcp_v6_md5_hash_skb,
1693 .md5_parse = tcp_v6_parse_md5_keys,
1694 };
1695 #endif
1696
1697 /*
1698 * TCP over IPv4 via INET6 API
1699 */
1700
1701 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1702 .queue_xmit = ip_queue_xmit,
1703 .send_check = tcp_v4_send_check,
1704 .rebuild_header = inet_sk_rebuild_header,
1705 .sk_rx_dst_set = inet_sk_rx_dst_set,
1706 .conn_request = tcp_v6_conn_request,
1707 .syn_recv_sock = tcp_v6_syn_recv_sock,
1708 .net_header_len = sizeof(struct iphdr),
1709 .setsockopt = ipv6_setsockopt,
1710 .getsockopt = ipv6_getsockopt,
1711 .addr2sockaddr = inet6_csk_addr2sockaddr,
1712 .sockaddr_len = sizeof(struct sockaddr_in6),
1713 .bind_conflict = inet6_csk_bind_conflict,
1714 #ifdef CONFIG_COMPAT
1715 .compat_setsockopt = compat_ipv6_setsockopt,
1716 .compat_getsockopt = compat_ipv6_getsockopt,
1717 #endif
1718 };
1719
1720 #ifdef CONFIG_TCP_MD5SIG
1721 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1722 .md5_lookup = tcp_v4_md5_lookup,
1723 .calc_md5_hash = tcp_v4_md5_hash_skb,
1724 .md5_parse = tcp_v6_parse_md5_keys,
1725 };
1726 #endif
1727
1728 /* NOTE: A lot of things set to zero explicitly by call to
1729 * sk_alloc() so need not be done here.
1730 */
1731 static int tcp_v6_init_sock(struct sock *sk)
1732 {
1733 struct inet_connection_sock *icsk = inet_csk(sk);
1734
1735 tcp_init_sock(sk);
1736
1737 icsk->icsk_af_ops = &ipv6_specific;
1738
1739 #ifdef CONFIG_TCP_MD5SIG
1740 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1741 #endif
1742
1743 return 0;
1744 }
1745
1746 static void tcp_v6_destroy_sock(struct sock *sk)
1747 {
1748 tcp_v4_destroy_sock(sk);
1749 inet6_destroy_sock(sk);
1750 }
1751
1752 #ifdef CONFIG_PROC_FS
1753 /* Proc filesystem TCPv6 sock list dumping. */
1754 static void get_openreq6(struct seq_file *seq,
1755 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1756 {
1757 int ttd = req->expires - jiffies;
1758 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1759 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1760
1761 if (ttd < 0)
1762 ttd = 0;
1763
1764 seq_printf(seq,
1765 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1766 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1767 i,
1768 src->s6_addr32[0], src->s6_addr32[1],
1769 src->s6_addr32[2], src->s6_addr32[3],
1770 ntohs(inet_rsk(req)->loc_port),
1771 dest->s6_addr32[0], dest->s6_addr32[1],
1772 dest->s6_addr32[2], dest->s6_addr32[3],
1773 ntohs(inet_rsk(req)->rmt_port),
1774 TCP_SYN_RECV,
1775 0,0, /* could print option size, but that is af dependent. */
1776 1, /* timers active (only the expire timer) */
1777 jiffies_to_clock_t(ttd),
1778 req->num_timeout,
1779 from_kuid_munged(seq_user_ns(seq), uid),
1780 0, /* non standard timer */
1781 0, /* open_requests have no inode */
1782 0, req);
1783 }
1784
1785 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1786 {
1787 const struct in6_addr *dest, *src;
1788 __u16 destp, srcp;
1789 int timer_active;
1790 unsigned long timer_expires;
1791 const struct inet_sock *inet = inet_sk(sp);
1792 const struct tcp_sock *tp = tcp_sk(sp);
1793 const struct inet_connection_sock *icsk = inet_csk(sp);
1794 const struct ipv6_pinfo *np = inet6_sk(sp);
1795
1796 dest = &np->daddr;
1797 src = &np->rcv_saddr;
1798 destp = ntohs(inet->inet_dport);
1799 srcp = ntohs(inet->inet_sport);
1800
1801 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1802 timer_active = 1;
1803 timer_expires = icsk->icsk_timeout;
1804 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1805 timer_active = 4;
1806 timer_expires = icsk->icsk_timeout;
1807 } else if (timer_pending(&sp->sk_timer)) {
1808 timer_active = 2;
1809 timer_expires = sp->sk_timer.expires;
1810 } else {
1811 timer_active = 0;
1812 timer_expires = jiffies;
1813 }
1814
1815 seq_printf(seq,
1816 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1817 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1818 i,
1819 src->s6_addr32[0], src->s6_addr32[1],
1820 src->s6_addr32[2], src->s6_addr32[3], srcp,
1821 dest->s6_addr32[0], dest->s6_addr32[1],
1822 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1823 sp->sk_state,
1824 tp->write_seq-tp->snd_una,
1825 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1826 timer_active,
1827 jiffies_delta_to_clock_t(timer_expires - jiffies),
1828 icsk->icsk_retransmits,
1829 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1830 icsk->icsk_probes_out,
1831 sock_i_ino(sp),
1832 atomic_read(&sp->sk_refcnt), sp,
1833 jiffies_to_clock_t(icsk->icsk_rto),
1834 jiffies_to_clock_t(icsk->icsk_ack.ato),
1835 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1836 tp->snd_cwnd,
1837 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1838 );
1839 }
1840
1841 static void get_timewait6_sock(struct seq_file *seq,
1842 struct inet_timewait_sock *tw, int i)
1843 {
1844 const struct in6_addr *dest, *src;
1845 __u16 destp, srcp;
1846 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1847 long delta = tw->tw_ttd - jiffies;
1848
1849 dest = &tw6->tw_v6_daddr;
1850 src = &tw6->tw_v6_rcv_saddr;
1851 destp = ntohs(tw->tw_dport);
1852 srcp = ntohs(tw->tw_sport);
1853
1854 seq_printf(seq,
1855 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1856 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1857 i,
1858 src->s6_addr32[0], src->s6_addr32[1],
1859 src->s6_addr32[2], src->s6_addr32[3], srcp,
1860 dest->s6_addr32[0], dest->s6_addr32[1],
1861 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1862 tw->tw_substate, 0, 0,
1863 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1864 atomic_read(&tw->tw_refcnt), tw);
1865 }
1866
1867 static int tcp6_seq_show(struct seq_file *seq, void *v)
1868 {
1869 struct tcp_iter_state *st;
1870
1871 if (v == SEQ_START_TOKEN) {
1872 seq_puts(seq,
1873 " sl "
1874 "local_address "
1875 "remote_address "
1876 "st tx_queue rx_queue tr tm->when retrnsmt"
1877 " uid timeout inode\n");
1878 goto out;
1879 }
1880 st = seq->private;
1881
1882 switch (st->state) {
1883 case TCP_SEQ_STATE_LISTENING:
1884 case TCP_SEQ_STATE_ESTABLISHED:
1885 get_tcp6_sock(seq, v, st->num);
1886 break;
1887 case TCP_SEQ_STATE_OPENREQ:
1888 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1889 break;
1890 case TCP_SEQ_STATE_TIME_WAIT:
1891 get_timewait6_sock(seq, v, st->num);
1892 break;
1893 }
1894 out:
1895 return 0;
1896 }
1897
1898 static const struct file_operations tcp6_afinfo_seq_fops = {
1899 .owner = THIS_MODULE,
1900 .open = tcp_seq_open,
1901 .read = seq_read,
1902 .llseek = seq_lseek,
1903 .release = seq_release_net
1904 };
1905
1906 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1907 .name = "tcp6",
1908 .family = AF_INET6,
1909 .seq_fops = &tcp6_afinfo_seq_fops,
1910 .seq_ops = {
1911 .show = tcp6_seq_show,
1912 },
1913 };
1914
1915 int __net_init tcp6_proc_init(struct net *net)
1916 {
1917 return tcp_proc_register(net, &tcp6_seq_afinfo);
1918 }
1919
1920 void tcp6_proc_exit(struct net *net)
1921 {
1922 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1923 }
1924 #endif
1925
1926 struct proto tcpv6_prot = {
1927 .name = "TCPv6",
1928 .owner = THIS_MODULE,
1929 .close = tcp_close,
1930 .connect = tcp_v6_connect,
1931 .disconnect = tcp_disconnect,
1932 .accept = inet_csk_accept,
1933 .ioctl = tcp_ioctl,
1934 .init = tcp_v6_init_sock,
1935 .destroy = tcp_v6_destroy_sock,
1936 .shutdown = tcp_shutdown,
1937 .setsockopt = tcp_setsockopt,
1938 .getsockopt = tcp_getsockopt,
1939 .recvmsg = tcp_recvmsg,
1940 .sendmsg = tcp_sendmsg,
1941 .sendpage = tcp_sendpage,
1942 .backlog_rcv = tcp_v6_do_rcv,
1943 .release_cb = tcp_release_cb,
1944 .mtu_reduced = tcp_v6_mtu_reduced,
1945 .hash = tcp_v6_hash,
1946 .unhash = inet_unhash,
1947 .get_port = inet_csk_get_port,
1948 .enter_memory_pressure = tcp_enter_memory_pressure,
1949 .sockets_allocated = &tcp_sockets_allocated,
1950 .memory_allocated = &tcp_memory_allocated,
1951 .memory_pressure = &tcp_memory_pressure,
1952 .orphan_count = &tcp_orphan_count,
1953 .sysctl_wmem = sysctl_tcp_wmem,
1954 .sysctl_rmem = sysctl_tcp_rmem,
1955 .max_header = MAX_TCP_HEADER,
1956 .obj_size = sizeof(struct tcp6_sock),
1957 .slab_flags = SLAB_DESTROY_BY_RCU,
1958 .twsk_prot = &tcp6_timewait_sock_ops,
1959 .rsk_prot = &tcp6_request_sock_ops,
1960 .h.hashinfo = &tcp_hashinfo,
1961 .no_autobind = true,
1962 #ifdef CONFIG_COMPAT
1963 .compat_setsockopt = compat_tcp_setsockopt,
1964 .compat_getsockopt = compat_tcp_getsockopt,
1965 #endif
1966 #ifdef CONFIG_MEMCG_KMEM
1967 .proto_cgroup = tcp_proto_cgroup,
1968 #endif
1969 };
1970
1971 static const struct inet6_protocol tcpv6_protocol = {
1972 .early_demux = tcp_v6_early_demux,
1973 .handler = tcp_v6_rcv,
1974 .err_handler = tcp_v6_err,
1975 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1976 };
1977
1978 static struct inet_protosw tcpv6_protosw = {
1979 .type = SOCK_STREAM,
1980 .protocol = IPPROTO_TCP,
1981 .prot = &tcpv6_prot,
1982 .ops = &inet6_stream_ops,
1983 .no_check = 0,
1984 .flags = INET_PROTOSW_PERMANENT |
1985 INET_PROTOSW_ICSK,
1986 };
1987
1988 static int __net_init tcpv6_net_init(struct net *net)
1989 {
1990 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1991 SOCK_RAW, IPPROTO_TCP, net);
1992 }
1993
1994 static void __net_exit tcpv6_net_exit(struct net *net)
1995 {
1996 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1997 }
1998
1999 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2000 {
2001 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2002 }
2003
2004 static struct pernet_operations tcpv6_net_ops = {
2005 .init = tcpv6_net_init,
2006 .exit = tcpv6_net_exit,
2007 .exit_batch = tcpv6_net_exit_batch,
2008 };
2009
2010 int __init tcpv6_init(void)
2011 {
2012 int ret;
2013
2014 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2015 if (ret)
2016 goto out;
2017
2018 /* register inet6 protocol */
2019 ret = inet6_register_protosw(&tcpv6_protosw);
2020 if (ret)
2021 goto out_tcpv6_protocol;
2022
2023 ret = register_pernet_subsys(&tcpv6_net_ops);
2024 if (ret)
2025 goto out_tcpv6_protosw;
2026 out:
2027 return ret;
2028
2029 out_tcpv6_protosw:
2030 inet6_unregister_protosw(&tcpv6_protosw);
2031 out_tcpv6_protocol:
2032 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2033 goto out;
2034 }
2035
2036 void tcpv6_exit(void)
2037 {
2038 unregister_pernet_subsys(&tcpv6_net_ops);
2039 inet6_unregister_protosw(&tcpv6_protosw);
2040 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2041 }