ipv4: Pull GSO registration out of inet_init()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / tcp_ipv6.c
... / ...
CommitLineData
1/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/bottom_half.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41#include <linux/slab.h>
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
49#include <net/inet6_hashtables.h>
50#include <net/inet6_connection_sock.h>
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/snmp.h>
60#include <net/dsfield.h>
61#include <net/timewait_sock.h>
62#include <net/netdma.h>
63#include <net/inet_common.h>
64#include <net/secure_seq.h>
65#include <net/tcp_memcontrol.h>
66
67#include <asm/uaccess.h>
68
69#include <linux/proc_fs.h>
70#include <linux/seq_file.h>
71
72#include <linux/crypto.h>
73#include <linux/scatterlist.h>
74#include "ip6_offload.h"
75
76static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
77static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
78 struct request_sock *req);
79
80static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81
82static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific;
84#ifdef CONFIG_TCP_MD5SIG
85static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 const struct in6_addr *addr)
90{
91 return NULL;
92}
93#endif
94
95static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
96{
97 struct dst_entry *dst = skb_dst(skb);
98 const struct rt6_info *rt = (const struct rt6_info *)dst;
99
100 dst_hold(dst);
101 sk->sk_rx_dst = dst;
102 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103 if (rt->rt6i_node)
104 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
105}
106
107static void tcp_v6_hash(struct sock *sk)
108{
109 if (sk->sk_state != TCP_CLOSE) {
110 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
111 tcp_prot.hash(sk);
112 return;
113 }
114 local_bh_disable();
115 __inet6_hash(sk, NULL);
116 local_bh_enable();
117 }
118}
119
120static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121{
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 ipv6_hdr(skb)->saddr.s6_addr32,
124 tcp_hdr(skb)->dest,
125 tcp_hdr(skb)->source);
126}
127
128static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 int addr_len)
130{
131 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 struct inet_sock *inet = inet_sk(sk);
133 struct inet_connection_sock *icsk = inet_csk(sk);
134 struct ipv6_pinfo *np = inet6_sk(sk);
135 struct tcp_sock *tp = tcp_sk(sk);
136 struct in6_addr *saddr = NULL, *final_p, final;
137 struct rt6_info *rt;
138 struct flowi6 fl6;
139 struct dst_entry *dst;
140 int addr_type;
141 int err;
142
143 if (addr_len < SIN6_LEN_RFC2133)
144 return -EINVAL;
145
146 if (usin->sin6_family != AF_INET6)
147 return -EAFNOSUPPORT;
148
149 memset(&fl6, 0, sizeof(fl6));
150
151 if (np->sndflow) {
152 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 IP6_ECN_flow_init(fl6.flowlabel);
154 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 struct ip6_flowlabel *flowlabel;
156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 if (flowlabel == NULL)
158 return -EINVAL;
159 usin->sin6_addr = flowlabel->dst;
160 fl6_sock_release(flowlabel);
161 }
162 }
163
164 /*
165 * connect() to INADDR_ANY means loopback (BSD'ism).
166 */
167
168 if(ipv6_addr_any(&usin->sin6_addr))
169 usin->sin6_addr.s6_addr[15] = 0x1;
170
171 addr_type = ipv6_addr_type(&usin->sin6_addr);
172
173 if(addr_type & IPV6_ADDR_MULTICAST)
174 return -ENETUNREACH;
175
176 if (addr_type&IPV6_ADDR_LINKLOCAL) {
177 if (addr_len >= sizeof(struct sockaddr_in6) &&
178 usin->sin6_scope_id) {
179 /* If interface is set while binding, indices
180 * must coincide.
181 */
182 if (sk->sk_bound_dev_if &&
183 sk->sk_bound_dev_if != usin->sin6_scope_id)
184 return -EINVAL;
185
186 sk->sk_bound_dev_if = usin->sin6_scope_id;
187 }
188
189 /* Connect to link-local address requires an interface */
190 if (!sk->sk_bound_dev_if)
191 return -EINVAL;
192 }
193
194 if (tp->rx_opt.ts_recent_stamp &&
195 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 tp->write_seq = 0;
199 }
200
201 np->daddr = usin->sin6_addr;
202 np->flow_label = fl6.flowlabel;
203
204 /*
205 * TCP over IPv4
206 */
207
208 if (addr_type == IPV6_ADDR_MAPPED) {
209 u32 exthdrlen = icsk->icsk_ext_hdr_len;
210 struct sockaddr_in sin;
211
212 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
213
214 if (__ipv6_only_sock(sk))
215 return -ENETUNREACH;
216
217 sin.sin_family = AF_INET;
218 sin.sin_port = usin->sin6_port;
219 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
220
221 icsk->icsk_af_ops = &ipv6_mapped;
222 sk->sk_backlog_rcv = tcp_v4_do_rcv;
223#ifdef CONFIG_TCP_MD5SIG
224 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
225#endif
226
227 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
228
229 if (err) {
230 icsk->icsk_ext_hdr_len = exthdrlen;
231 icsk->icsk_af_ops = &ipv6_specific;
232 sk->sk_backlog_rcv = tcp_v6_do_rcv;
233#ifdef CONFIG_TCP_MD5SIG
234 tp->af_specific = &tcp_sock_ipv6_specific;
235#endif
236 goto failure;
237 } else {
238 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
239 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
240 &np->rcv_saddr);
241 }
242
243 return err;
244 }
245
246 if (!ipv6_addr_any(&np->rcv_saddr))
247 saddr = &np->rcv_saddr;
248
249 fl6.flowi6_proto = IPPROTO_TCP;
250 fl6.daddr = np->daddr;
251 fl6.saddr = saddr ? *saddr : np->saddr;
252 fl6.flowi6_oif = sk->sk_bound_dev_if;
253 fl6.flowi6_mark = sk->sk_mark;
254 fl6.fl6_dport = usin->sin6_port;
255 fl6.fl6_sport = inet->inet_sport;
256
257 final_p = fl6_update_dst(&fl6, np->opt, &final);
258
259 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
260
261 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
262 if (IS_ERR(dst)) {
263 err = PTR_ERR(dst);
264 goto failure;
265 }
266
267 if (saddr == NULL) {
268 saddr = &fl6.saddr;
269 np->rcv_saddr = *saddr;
270 }
271
272 /* set the source address */
273 np->saddr = *saddr;
274 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
275
276 sk->sk_gso_type = SKB_GSO_TCPV6;
277 __ip6_dst_store(sk, dst, NULL, NULL);
278
279 rt = (struct rt6_info *) dst;
280 if (tcp_death_row.sysctl_tw_recycle &&
281 !tp->rx_opt.ts_recent_stamp &&
282 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
283 tcp_fetch_timewait_stamp(sk, dst);
284
285 icsk->icsk_ext_hdr_len = 0;
286 if (np->opt)
287 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
288 np->opt->opt_nflen);
289
290 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
291
292 inet->inet_dport = usin->sin6_port;
293
294 tcp_set_state(sk, TCP_SYN_SENT);
295 err = inet6_hash_connect(&tcp_death_row, sk);
296 if (err)
297 goto late_failure;
298
299 if (!tp->write_seq)
300 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
301 np->daddr.s6_addr32,
302 inet->inet_sport,
303 inet->inet_dport);
304
305 err = tcp_connect(sk);
306 if (err)
307 goto late_failure;
308
309 return 0;
310
311late_failure:
312 tcp_set_state(sk, TCP_CLOSE);
313 __sk_dst_reset(sk);
314failure:
315 inet->inet_dport = 0;
316 sk->sk_route_caps = 0;
317 return err;
318}
319
320static void tcp_v6_mtu_reduced(struct sock *sk)
321{
322 struct dst_entry *dst;
323
324 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
325 return;
326
327 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
328 if (!dst)
329 return;
330
331 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
332 tcp_sync_mss(sk, dst_mtu(dst));
333 tcp_simple_retransmit(sk);
334 }
335}
336
337static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
338 u8 type, u8 code, int offset, __be32 info)
339{
340 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
341 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
342 struct ipv6_pinfo *np;
343 struct sock *sk;
344 int err;
345 struct tcp_sock *tp;
346 __u32 seq;
347 struct net *net = dev_net(skb->dev);
348
349 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
350 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
351
352 if (sk == NULL) {
353 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
354 ICMP6_MIB_INERRORS);
355 return;
356 }
357
358 if (sk->sk_state == TCP_TIME_WAIT) {
359 inet_twsk_put(inet_twsk(sk));
360 return;
361 }
362
363 bh_lock_sock(sk);
364 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
365 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
366
367 if (sk->sk_state == TCP_CLOSE)
368 goto out;
369
370 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
371 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
372 goto out;
373 }
374
375 tp = tcp_sk(sk);
376 seq = ntohl(th->seq);
377 if (sk->sk_state != TCP_LISTEN &&
378 !between(seq, tp->snd_una, tp->snd_nxt)) {
379 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
380 goto out;
381 }
382
383 np = inet6_sk(sk);
384
385 if (type == NDISC_REDIRECT) {
386 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
387
388 if (dst)
389 dst->ops->redirect(dst, sk, skb);
390 }
391
392 if (type == ICMPV6_PKT_TOOBIG) {
393 tp->mtu_info = ntohl(info);
394 if (!sock_owned_by_user(sk))
395 tcp_v6_mtu_reduced(sk);
396 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
397 &tp->tsq_flags))
398 sock_hold(sk);
399 goto out;
400 }
401
402 icmpv6_err_convert(type, code, &err);
403
404 /* Might be for an request_sock */
405 switch (sk->sk_state) {
406 struct request_sock *req, **prev;
407 case TCP_LISTEN:
408 if (sock_owned_by_user(sk))
409 goto out;
410
411 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
412 &hdr->saddr, inet6_iif(skb));
413 if (!req)
414 goto out;
415
416 /* ICMPs are not backlogged, hence we cannot get
417 * an established socket here.
418 */
419 WARN_ON(req->sk != NULL);
420
421 if (seq != tcp_rsk(req)->snt_isn) {
422 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
423 goto out;
424 }
425
426 inet_csk_reqsk_queue_drop(sk, req, prev);
427 goto out;
428
429 case TCP_SYN_SENT:
430 case TCP_SYN_RECV: /* Cannot happen.
431 It can, it SYNs are crossed. --ANK */
432 if (!sock_owned_by_user(sk)) {
433 sk->sk_err = err;
434 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
435
436 tcp_done(sk);
437 } else
438 sk->sk_err_soft = err;
439 goto out;
440 }
441
442 if (!sock_owned_by_user(sk) && np->recverr) {
443 sk->sk_err = err;
444 sk->sk_error_report(sk);
445 } else
446 sk->sk_err_soft = err;
447
448out:
449 bh_unlock_sock(sk);
450 sock_put(sk);
451}
452
453
454static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
455 struct flowi6 *fl6,
456 struct request_sock *req,
457 struct request_values *rvp,
458 u16 queue_mapping)
459{
460 struct inet6_request_sock *treq = inet6_rsk(req);
461 struct ipv6_pinfo *np = inet6_sk(sk);
462 struct sk_buff * skb;
463 int err = -ENOMEM;
464
465 /* First, grab a route. */
466 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
467 goto done;
468
469 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
470
471 if (skb) {
472 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
473
474 fl6->daddr = treq->rmt_addr;
475 skb_set_queue_mapping(skb, queue_mapping);
476 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
477 err = net_xmit_eval(err);
478 }
479
480done:
481 return err;
482}
483
484static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
485 struct request_values *rvp)
486{
487 struct flowi6 fl6;
488 int res;
489
490 res = tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
491 if (!res)
492 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
493 return res;
494}
495
496static void tcp_v6_reqsk_destructor(struct request_sock *req)
497{
498 kfree_skb(inet6_rsk(req)->pktopts);
499}
500
501#ifdef CONFIG_TCP_MD5SIG
502static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
503 const struct in6_addr *addr)
504{
505 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
506}
507
508static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
509 struct sock *addr_sk)
510{
511 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
512}
513
514static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
515 struct request_sock *req)
516{
517 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
518}
519
520static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
521 int optlen)
522{
523 struct tcp_md5sig cmd;
524 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
525
526 if (optlen < sizeof(cmd))
527 return -EINVAL;
528
529 if (copy_from_user(&cmd, optval, sizeof(cmd)))
530 return -EFAULT;
531
532 if (sin6->sin6_family != AF_INET6)
533 return -EINVAL;
534
535 if (!cmd.tcpm_keylen) {
536 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
537 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
538 AF_INET);
539 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
540 AF_INET6);
541 }
542
543 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
544 return -EINVAL;
545
546 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
547 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
548 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
549
550 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
551 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
552}
553
554static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
555 const struct in6_addr *daddr,
556 const struct in6_addr *saddr, int nbytes)
557{
558 struct tcp6_pseudohdr *bp;
559 struct scatterlist sg;
560
561 bp = &hp->md5_blk.ip6;
562 /* 1. TCP pseudo-header (RFC2460) */
563 bp->saddr = *saddr;
564 bp->daddr = *daddr;
565 bp->protocol = cpu_to_be32(IPPROTO_TCP);
566 bp->len = cpu_to_be32(nbytes);
567
568 sg_init_one(&sg, bp, sizeof(*bp));
569 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
570}
571
572static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
573 const struct in6_addr *daddr, struct in6_addr *saddr,
574 const struct tcphdr *th)
575{
576 struct tcp_md5sig_pool *hp;
577 struct hash_desc *desc;
578
579 hp = tcp_get_md5sig_pool();
580 if (!hp)
581 goto clear_hash_noput;
582 desc = &hp->md5_desc;
583
584 if (crypto_hash_init(desc))
585 goto clear_hash;
586 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
587 goto clear_hash;
588 if (tcp_md5_hash_header(hp, th))
589 goto clear_hash;
590 if (tcp_md5_hash_key(hp, key))
591 goto clear_hash;
592 if (crypto_hash_final(desc, md5_hash))
593 goto clear_hash;
594
595 tcp_put_md5sig_pool();
596 return 0;
597
598clear_hash:
599 tcp_put_md5sig_pool();
600clear_hash_noput:
601 memset(md5_hash, 0, 16);
602 return 1;
603}
604
605static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
606 const struct sock *sk,
607 const struct request_sock *req,
608 const struct sk_buff *skb)
609{
610 const struct in6_addr *saddr, *daddr;
611 struct tcp_md5sig_pool *hp;
612 struct hash_desc *desc;
613 const struct tcphdr *th = tcp_hdr(skb);
614
615 if (sk) {
616 saddr = &inet6_sk(sk)->saddr;
617 daddr = &inet6_sk(sk)->daddr;
618 } else if (req) {
619 saddr = &inet6_rsk(req)->loc_addr;
620 daddr = &inet6_rsk(req)->rmt_addr;
621 } else {
622 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
623 saddr = &ip6h->saddr;
624 daddr = &ip6h->daddr;
625 }
626
627 hp = tcp_get_md5sig_pool();
628 if (!hp)
629 goto clear_hash_noput;
630 desc = &hp->md5_desc;
631
632 if (crypto_hash_init(desc))
633 goto clear_hash;
634
635 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
636 goto clear_hash;
637 if (tcp_md5_hash_header(hp, th))
638 goto clear_hash;
639 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
640 goto clear_hash;
641 if (tcp_md5_hash_key(hp, key))
642 goto clear_hash;
643 if (crypto_hash_final(desc, md5_hash))
644 goto clear_hash;
645
646 tcp_put_md5sig_pool();
647 return 0;
648
649clear_hash:
650 tcp_put_md5sig_pool();
651clear_hash_noput:
652 memset(md5_hash, 0, 16);
653 return 1;
654}
655
656static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
657{
658 const __u8 *hash_location = NULL;
659 struct tcp_md5sig_key *hash_expected;
660 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
661 const struct tcphdr *th = tcp_hdr(skb);
662 int genhash;
663 u8 newhash[16];
664
665 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
666 hash_location = tcp_parse_md5sig_option(th);
667
668 /* We've parsed the options - do we have a hash? */
669 if (!hash_expected && !hash_location)
670 return 0;
671
672 if (hash_expected && !hash_location) {
673 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
674 return 1;
675 }
676
677 if (!hash_expected && hash_location) {
678 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
679 return 1;
680 }
681
682 /* check the signature */
683 genhash = tcp_v6_md5_hash_skb(newhash,
684 hash_expected,
685 NULL, NULL, skb);
686
687 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
688 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
689 genhash ? "failed" : "mismatch",
690 &ip6h->saddr, ntohs(th->source),
691 &ip6h->daddr, ntohs(th->dest));
692 return 1;
693 }
694 return 0;
695}
696#endif
697
698struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
699 .family = AF_INET6,
700 .obj_size = sizeof(struct tcp6_request_sock),
701 .rtx_syn_ack = tcp_v6_rtx_synack,
702 .send_ack = tcp_v6_reqsk_send_ack,
703 .destructor = tcp_v6_reqsk_destructor,
704 .send_reset = tcp_v6_send_reset,
705 .syn_ack_timeout = tcp_syn_ack_timeout,
706};
707
708#ifdef CONFIG_TCP_MD5SIG
709static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
710 .md5_lookup = tcp_v6_reqsk_md5_lookup,
711 .calc_md5_hash = tcp_v6_md5_hash_skb,
712};
713#endif
714
715static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
716 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
717{
718 const struct tcphdr *th = tcp_hdr(skb);
719 struct tcphdr *t1;
720 struct sk_buff *buff;
721 struct flowi6 fl6;
722 struct net *net = dev_net(skb_dst(skb)->dev);
723 struct sock *ctl_sk = net->ipv6.tcp_sk;
724 unsigned int tot_len = sizeof(struct tcphdr);
725 struct dst_entry *dst;
726 __be32 *topt;
727
728 if (ts)
729 tot_len += TCPOLEN_TSTAMP_ALIGNED;
730#ifdef CONFIG_TCP_MD5SIG
731 if (key)
732 tot_len += TCPOLEN_MD5SIG_ALIGNED;
733#endif
734
735 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
736 GFP_ATOMIC);
737 if (buff == NULL)
738 return;
739
740 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
741
742 t1 = (struct tcphdr *) skb_push(buff, tot_len);
743 skb_reset_transport_header(buff);
744
745 /* Swap the send and the receive. */
746 memset(t1, 0, sizeof(*t1));
747 t1->dest = th->source;
748 t1->source = th->dest;
749 t1->doff = tot_len / 4;
750 t1->seq = htonl(seq);
751 t1->ack_seq = htonl(ack);
752 t1->ack = !rst || !th->ack;
753 t1->rst = rst;
754 t1->window = htons(win);
755
756 topt = (__be32 *)(t1 + 1);
757
758 if (ts) {
759 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
760 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
761 *topt++ = htonl(tcp_time_stamp);
762 *topt++ = htonl(ts);
763 }
764
765#ifdef CONFIG_TCP_MD5SIG
766 if (key) {
767 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
768 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
769 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
770 &ipv6_hdr(skb)->saddr,
771 &ipv6_hdr(skb)->daddr, t1);
772 }
773#endif
774
775 memset(&fl6, 0, sizeof(fl6));
776 fl6.daddr = ipv6_hdr(skb)->saddr;
777 fl6.saddr = ipv6_hdr(skb)->daddr;
778
779 buff->ip_summed = CHECKSUM_PARTIAL;
780 buff->csum = 0;
781
782 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
783
784 fl6.flowi6_proto = IPPROTO_TCP;
785 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
786 fl6.flowi6_oif = inet6_iif(skb);
787 fl6.fl6_dport = t1->dest;
788 fl6.fl6_sport = t1->source;
789 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
790
791 /* Pass a socket to ip6_dst_lookup either it is for RST
792 * Underlying function will use this to retrieve the network
793 * namespace
794 */
795 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
796 if (!IS_ERR(dst)) {
797 skb_dst_set(buff, dst);
798 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
799 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
800 if (rst)
801 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
802 return;
803 }
804
805 kfree_skb(buff);
806}
807
808static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
809{
810 const struct tcphdr *th = tcp_hdr(skb);
811 u32 seq = 0, ack_seq = 0;
812 struct tcp_md5sig_key *key = NULL;
813#ifdef CONFIG_TCP_MD5SIG
814 const __u8 *hash_location = NULL;
815 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
816 unsigned char newhash[16];
817 int genhash;
818 struct sock *sk1 = NULL;
819#endif
820
821 if (th->rst)
822 return;
823
824 if (!ipv6_unicast_destination(skb))
825 return;
826
827#ifdef CONFIG_TCP_MD5SIG
828 hash_location = tcp_parse_md5sig_option(th);
829 if (!sk && hash_location) {
830 /*
831 * active side is lost. Try to find listening socket through
832 * source port, and then find md5 key through listening socket.
833 * we are not loose security here:
834 * Incoming packet is checked with md5 hash with finding key,
835 * no RST generated if md5 hash doesn't match.
836 */
837 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
838 &tcp_hashinfo, &ipv6h->daddr,
839 ntohs(th->source), inet6_iif(skb));
840 if (!sk1)
841 return;
842
843 rcu_read_lock();
844 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
845 if (!key)
846 goto release_sk1;
847
848 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
849 if (genhash || memcmp(hash_location, newhash, 16) != 0)
850 goto release_sk1;
851 } else {
852 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
853 }
854#endif
855
856 if (th->ack)
857 seq = ntohl(th->ack_seq);
858 else
859 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
860 (th->doff << 2);
861
862 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
863
864#ifdef CONFIG_TCP_MD5SIG
865release_sk1:
866 if (sk1) {
867 rcu_read_unlock();
868 sock_put(sk1);
869 }
870#endif
871}
872
873static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
874 struct tcp_md5sig_key *key, u8 tclass)
875{
876 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
877}
878
879static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
880{
881 struct inet_timewait_sock *tw = inet_twsk(sk);
882 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
883
884 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
885 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
886 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
887 tw->tw_tclass);
888
889 inet_twsk_put(tw);
890}
891
892static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
893 struct request_sock *req)
894{
895 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
896 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
897}
898
899
900static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
901{
902 struct request_sock *req, **prev;
903 const struct tcphdr *th = tcp_hdr(skb);
904 struct sock *nsk;
905
906 /* Find possible connection requests. */
907 req = inet6_csk_search_req(sk, &prev, th->source,
908 &ipv6_hdr(skb)->saddr,
909 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
910 if (req)
911 return tcp_check_req(sk, skb, req, prev, false);
912
913 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
914 &ipv6_hdr(skb)->saddr, th->source,
915 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
916
917 if (nsk) {
918 if (nsk->sk_state != TCP_TIME_WAIT) {
919 bh_lock_sock(nsk);
920 return nsk;
921 }
922 inet_twsk_put(inet_twsk(nsk));
923 return NULL;
924 }
925
926#ifdef CONFIG_SYN_COOKIES
927 if (!th->syn)
928 sk = cookie_v6_check(sk, skb);
929#endif
930 return sk;
931}
932
933/* FIXME: this is substantially similar to the ipv4 code.
934 * Can some kind of merge be done? -- erics
935 */
936static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
937{
938 struct tcp_extend_values tmp_ext;
939 struct tcp_options_received tmp_opt;
940 const u8 *hash_location;
941 struct request_sock *req;
942 struct inet6_request_sock *treq;
943 struct ipv6_pinfo *np = inet6_sk(sk);
944 struct tcp_sock *tp = tcp_sk(sk);
945 __u32 isn = TCP_SKB_CB(skb)->when;
946 struct dst_entry *dst = NULL;
947 struct flowi6 fl6;
948 bool want_cookie = false;
949
950 if (skb->protocol == htons(ETH_P_IP))
951 return tcp_v4_conn_request(sk, skb);
952
953 if (!ipv6_unicast_destination(skb))
954 goto drop;
955
956 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
957 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
958 if (!want_cookie)
959 goto drop;
960 }
961
962 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
963 goto drop;
964
965 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
966 if (req == NULL)
967 goto drop;
968
969#ifdef CONFIG_TCP_MD5SIG
970 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
971#endif
972
973 tcp_clear_options(&tmp_opt);
974 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
975 tmp_opt.user_mss = tp->rx_opt.user_mss;
976 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
977
978 if (tmp_opt.cookie_plus > 0 &&
979 tmp_opt.saw_tstamp &&
980 !tp->rx_opt.cookie_out_never &&
981 (sysctl_tcp_cookie_size > 0 ||
982 (tp->cookie_values != NULL &&
983 tp->cookie_values->cookie_desired > 0))) {
984 u8 *c;
985 u32 *d;
986 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
987 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
988
989 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
990 goto drop_and_free;
991
992 /* Secret recipe starts with IP addresses */
993 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
994 *mess++ ^= *d++;
995 *mess++ ^= *d++;
996 *mess++ ^= *d++;
997 *mess++ ^= *d++;
998 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
999 *mess++ ^= *d++;
1000 *mess++ ^= *d++;
1001 *mess++ ^= *d++;
1002 *mess++ ^= *d++;
1003
1004 /* plus variable length Initiator Cookie */
1005 c = (u8 *)mess;
1006 while (l-- > 0)
1007 *c++ ^= *hash_location++;
1008
1009 want_cookie = false; /* not our kind of cookie */
1010 tmp_ext.cookie_out_never = 0; /* false */
1011 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1012 } else if (!tp->rx_opt.cookie_in_always) {
1013 /* redundant indications, but ensure initialization. */
1014 tmp_ext.cookie_out_never = 1; /* true */
1015 tmp_ext.cookie_plus = 0;
1016 } else {
1017 goto drop_and_free;
1018 }
1019 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1020
1021 if (want_cookie && !tmp_opt.saw_tstamp)
1022 tcp_clear_options(&tmp_opt);
1023
1024 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1025 tcp_openreq_init(req, &tmp_opt, skb);
1026
1027 treq = inet6_rsk(req);
1028 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1029 treq->loc_addr = ipv6_hdr(skb)->daddr;
1030 if (!want_cookie || tmp_opt.tstamp_ok)
1031 TCP_ECN_create_request(req, skb);
1032
1033 treq->iif = sk->sk_bound_dev_if;
1034
1035 /* So that link locals have meaning */
1036 if (!sk->sk_bound_dev_if &&
1037 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1038 treq->iif = inet6_iif(skb);
1039
1040 if (!isn) {
1041 if (ipv6_opt_accepted(sk, skb) ||
1042 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1043 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1044 atomic_inc(&skb->users);
1045 treq->pktopts = skb;
1046 }
1047
1048 if (want_cookie) {
1049 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1050 req->cookie_ts = tmp_opt.tstamp_ok;
1051 goto have_isn;
1052 }
1053
1054 /* VJ's idea. We save last timestamp seen
1055 * from the destination in peer table, when entering
1056 * state TIME-WAIT, and check against it before
1057 * accepting new connection request.
1058 *
1059 * If "isn" is not zero, this request hit alive
1060 * timewait bucket, so that all the necessary checks
1061 * are made in the function processing timewait state.
1062 */
1063 if (tmp_opt.saw_tstamp &&
1064 tcp_death_row.sysctl_tw_recycle &&
1065 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1066 if (!tcp_peer_is_proven(req, dst, true)) {
1067 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1068 goto drop_and_release;
1069 }
1070 }
1071 /* Kill the following clause, if you dislike this way. */
1072 else if (!sysctl_tcp_syncookies &&
1073 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1074 (sysctl_max_syn_backlog >> 2)) &&
1075 !tcp_peer_is_proven(req, dst, false)) {
1076 /* Without syncookies last quarter of
1077 * backlog is filled with destinations,
1078 * proven to be alive.
1079 * It means that we continue to communicate
1080 * to destinations, already remembered
1081 * to the moment of synflood.
1082 */
1083 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1084 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1085 goto drop_and_release;
1086 }
1087
1088 isn = tcp_v6_init_sequence(skb);
1089 }
1090have_isn:
1091 tcp_rsk(req)->snt_isn = isn;
1092
1093 if (security_inet_conn_request(sk, skb, req))
1094 goto drop_and_release;
1095
1096 if (tcp_v6_send_synack(sk, dst, &fl6, req,
1097 (struct request_values *)&tmp_ext,
1098 skb_get_queue_mapping(skb)) ||
1099 want_cookie)
1100 goto drop_and_free;
1101
1102 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1103 tcp_rsk(req)->listener = NULL;
1104 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1105 return 0;
1106
1107drop_and_release:
1108 dst_release(dst);
1109drop_and_free:
1110 reqsk_free(req);
1111drop:
1112 return 0; /* don't send reset */
1113}
1114
1115static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1116 struct request_sock *req,
1117 struct dst_entry *dst)
1118{
1119 struct inet6_request_sock *treq;
1120 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1121 struct tcp6_sock *newtcp6sk;
1122 struct inet_sock *newinet;
1123 struct tcp_sock *newtp;
1124 struct sock *newsk;
1125#ifdef CONFIG_TCP_MD5SIG
1126 struct tcp_md5sig_key *key;
1127#endif
1128 struct flowi6 fl6;
1129
1130 if (skb->protocol == htons(ETH_P_IP)) {
1131 /*
1132 * v6 mapped
1133 */
1134
1135 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1136
1137 if (newsk == NULL)
1138 return NULL;
1139
1140 newtcp6sk = (struct tcp6_sock *)newsk;
1141 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1142
1143 newinet = inet_sk(newsk);
1144 newnp = inet6_sk(newsk);
1145 newtp = tcp_sk(newsk);
1146
1147 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1148
1149 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1150
1151 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1152
1153 newnp->rcv_saddr = newnp->saddr;
1154
1155 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1156 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1157#ifdef CONFIG_TCP_MD5SIG
1158 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1159#endif
1160
1161 newnp->ipv6_ac_list = NULL;
1162 newnp->ipv6_fl_list = NULL;
1163 newnp->pktoptions = NULL;
1164 newnp->opt = NULL;
1165 newnp->mcast_oif = inet6_iif(skb);
1166 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1167 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1168
1169 /*
1170 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1171 * here, tcp_create_openreq_child now does this for us, see the comment in
1172 * that function for the gory details. -acme
1173 */
1174
1175 /* It is tricky place. Until this moment IPv4 tcp
1176 worked with IPv6 icsk.icsk_af_ops.
1177 Sync it now.
1178 */
1179 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1180
1181 return newsk;
1182 }
1183
1184 treq = inet6_rsk(req);
1185
1186 if (sk_acceptq_is_full(sk))
1187 goto out_overflow;
1188
1189 if (!dst) {
1190 dst = inet6_csk_route_req(sk, &fl6, req);
1191 if (!dst)
1192 goto out;
1193 }
1194
1195 newsk = tcp_create_openreq_child(sk, req, skb);
1196 if (newsk == NULL)
1197 goto out_nonewsk;
1198
1199 /*
1200 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1201 * count here, tcp_create_openreq_child now does this for us, see the
1202 * comment in that function for the gory details. -acme
1203 */
1204
1205 newsk->sk_gso_type = SKB_GSO_TCPV6;
1206 __ip6_dst_store(newsk, dst, NULL, NULL);
1207 inet6_sk_rx_dst_set(newsk, skb);
1208
1209 newtcp6sk = (struct tcp6_sock *)newsk;
1210 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1211
1212 newtp = tcp_sk(newsk);
1213 newinet = inet_sk(newsk);
1214 newnp = inet6_sk(newsk);
1215
1216 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1217
1218 newnp->daddr = treq->rmt_addr;
1219 newnp->saddr = treq->loc_addr;
1220 newnp->rcv_saddr = treq->loc_addr;
1221 newsk->sk_bound_dev_if = treq->iif;
1222
1223 /* Now IPv6 options...
1224
1225 First: no IPv4 options.
1226 */
1227 newinet->inet_opt = NULL;
1228 newnp->ipv6_ac_list = NULL;
1229 newnp->ipv6_fl_list = NULL;
1230
1231 /* Clone RX bits */
1232 newnp->rxopt.all = np->rxopt.all;
1233
1234 /* Clone pktoptions received with SYN */
1235 newnp->pktoptions = NULL;
1236 if (treq->pktopts != NULL) {
1237 newnp->pktoptions = skb_clone(treq->pktopts,
1238 sk_gfp_atomic(sk, GFP_ATOMIC));
1239 consume_skb(treq->pktopts);
1240 treq->pktopts = NULL;
1241 if (newnp->pktoptions)
1242 skb_set_owner_r(newnp->pktoptions, newsk);
1243 }
1244 newnp->opt = NULL;
1245 newnp->mcast_oif = inet6_iif(skb);
1246 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1247 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1248
1249 /* Clone native IPv6 options from listening socket (if any)
1250
1251 Yes, keeping reference count would be much more clever,
1252 but we make one more one thing there: reattach optmem
1253 to newsk.
1254 */
1255 if (np->opt)
1256 newnp->opt = ipv6_dup_options(newsk, np->opt);
1257
1258 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1259 if (newnp->opt)
1260 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1261 newnp->opt->opt_flen);
1262
1263 tcp_mtup_init(newsk);
1264 tcp_sync_mss(newsk, dst_mtu(dst));
1265 newtp->advmss = dst_metric_advmss(dst);
1266 if (tcp_sk(sk)->rx_opt.user_mss &&
1267 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1268 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1269
1270 tcp_initialize_rcv_mss(newsk);
1271 tcp_synack_rtt_meas(newsk, req);
1272 newtp->total_retrans = req->num_retrans;
1273
1274 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1275 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1276
1277#ifdef CONFIG_TCP_MD5SIG
1278 /* Copy over the MD5 key from the original socket */
1279 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1280 /* We're using one, so create a matching key
1281 * on the newsk structure. If we fail to get
1282 * memory, then we end up not copying the key
1283 * across. Shucks.
1284 */
1285 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1286 AF_INET6, key->key, key->keylen,
1287 sk_gfp_atomic(sk, GFP_ATOMIC));
1288 }
1289#endif
1290
1291 if (__inet_inherit_port(sk, newsk) < 0) {
1292 sock_put(newsk);
1293 goto out;
1294 }
1295 __inet6_hash(newsk, NULL);
1296
1297 return newsk;
1298
1299out_overflow:
1300 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1301out_nonewsk:
1302 dst_release(dst);
1303out:
1304 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1305 return NULL;
1306}
1307
1308static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1309{
1310 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1311 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1312 &ipv6_hdr(skb)->daddr, skb->csum)) {
1313 skb->ip_summed = CHECKSUM_UNNECESSARY;
1314 return 0;
1315 }
1316 }
1317
1318 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1319 &ipv6_hdr(skb)->saddr,
1320 &ipv6_hdr(skb)->daddr, 0));
1321
1322 if (skb->len <= 76) {
1323 return __skb_checksum_complete(skb);
1324 }
1325 return 0;
1326}
1327
1328/* The socket must have it's spinlock held when we get
1329 * here.
1330 *
1331 * We have a potential double-lock case here, so even when
1332 * doing backlog processing we use the BH locking scheme.
1333 * This is because we cannot sleep with the original spinlock
1334 * held.
1335 */
1336static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1337{
1338 struct ipv6_pinfo *np = inet6_sk(sk);
1339 struct tcp_sock *tp;
1340 struct sk_buff *opt_skb = NULL;
1341
1342 /* Imagine: socket is IPv6. IPv4 packet arrives,
1343 goes to IPv4 receive handler and backlogged.
1344 From backlog it always goes here. Kerboom...
1345 Fortunately, tcp_rcv_established and rcv_established
1346 handle them correctly, but it is not case with
1347 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1348 */
1349
1350 if (skb->protocol == htons(ETH_P_IP))
1351 return tcp_v4_do_rcv(sk, skb);
1352
1353#ifdef CONFIG_TCP_MD5SIG
1354 if (tcp_v6_inbound_md5_hash (sk, skb))
1355 goto discard;
1356#endif
1357
1358 if (sk_filter(sk, skb))
1359 goto discard;
1360
1361 /*
1362 * socket locking is here for SMP purposes as backlog rcv
1363 * is currently called with bh processing disabled.
1364 */
1365
1366 /* Do Stevens' IPV6_PKTOPTIONS.
1367
1368 Yes, guys, it is the only place in our code, where we
1369 may make it not affecting IPv4.
1370 The rest of code is protocol independent,
1371 and I do not like idea to uglify IPv4.
1372
1373 Actually, all the idea behind IPV6_PKTOPTIONS
1374 looks not very well thought. For now we latch
1375 options, received in the last packet, enqueued
1376 by tcp. Feel free to propose better solution.
1377 --ANK (980728)
1378 */
1379 if (np->rxopt.all)
1380 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1381
1382 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1383 struct dst_entry *dst = sk->sk_rx_dst;
1384
1385 sock_rps_save_rxhash(sk, skb);
1386 if (dst) {
1387 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1388 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1389 dst_release(dst);
1390 sk->sk_rx_dst = NULL;
1391 }
1392 }
1393
1394 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1395 goto reset;
1396 if (opt_skb)
1397 goto ipv6_pktoptions;
1398 return 0;
1399 }
1400
1401 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1402 goto csum_err;
1403
1404 if (sk->sk_state == TCP_LISTEN) {
1405 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1406 if (!nsk)
1407 goto discard;
1408
1409 /*
1410 * Queue it on the new socket if the new socket is active,
1411 * otherwise we just shortcircuit this and continue with
1412 * the new socket..
1413 */
1414 if(nsk != sk) {
1415 sock_rps_save_rxhash(nsk, skb);
1416 if (tcp_child_process(sk, nsk, skb))
1417 goto reset;
1418 if (opt_skb)
1419 __kfree_skb(opt_skb);
1420 return 0;
1421 }
1422 } else
1423 sock_rps_save_rxhash(sk, skb);
1424
1425 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1426 goto reset;
1427 if (opt_skb)
1428 goto ipv6_pktoptions;
1429 return 0;
1430
1431reset:
1432 tcp_v6_send_reset(sk, skb);
1433discard:
1434 if (opt_skb)
1435 __kfree_skb(opt_skb);
1436 kfree_skb(skb);
1437 return 0;
1438csum_err:
1439 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1440 goto discard;
1441
1442
1443ipv6_pktoptions:
1444 /* Do you ask, what is it?
1445
1446 1. skb was enqueued by tcp.
1447 2. skb is added to tail of read queue, rather than out of order.
1448 3. socket is not in passive state.
1449 4. Finally, it really contains options, which user wants to receive.
1450 */
1451 tp = tcp_sk(sk);
1452 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1453 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1454 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1455 np->mcast_oif = inet6_iif(opt_skb);
1456 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1457 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1458 if (np->rxopt.bits.rxtclass)
1459 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1460 if (ipv6_opt_accepted(sk, opt_skb)) {
1461 skb_set_owner_r(opt_skb, sk);
1462 opt_skb = xchg(&np->pktoptions, opt_skb);
1463 } else {
1464 __kfree_skb(opt_skb);
1465 opt_skb = xchg(&np->pktoptions, NULL);
1466 }
1467 }
1468
1469 kfree_skb(opt_skb);
1470 return 0;
1471}
1472
1473static int tcp_v6_rcv(struct sk_buff *skb)
1474{
1475 const struct tcphdr *th;
1476 const struct ipv6hdr *hdr;
1477 struct sock *sk;
1478 int ret;
1479 struct net *net = dev_net(skb->dev);
1480
1481 if (skb->pkt_type != PACKET_HOST)
1482 goto discard_it;
1483
1484 /*
1485 * Count it even if it's bad.
1486 */
1487 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1488
1489 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1490 goto discard_it;
1491
1492 th = tcp_hdr(skb);
1493
1494 if (th->doff < sizeof(struct tcphdr)/4)
1495 goto bad_packet;
1496 if (!pskb_may_pull(skb, th->doff*4))
1497 goto discard_it;
1498
1499 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1500 goto bad_packet;
1501
1502 th = tcp_hdr(skb);
1503 hdr = ipv6_hdr(skb);
1504 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1505 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1506 skb->len - th->doff*4);
1507 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1508 TCP_SKB_CB(skb)->when = 0;
1509 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1510 TCP_SKB_CB(skb)->sacked = 0;
1511
1512 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1513 if (!sk)
1514 goto no_tcp_socket;
1515
1516process:
1517 if (sk->sk_state == TCP_TIME_WAIT)
1518 goto do_time_wait;
1519
1520 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1521 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1522 goto discard_and_relse;
1523 }
1524
1525 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1526 goto discard_and_relse;
1527
1528 if (sk_filter(sk, skb))
1529 goto discard_and_relse;
1530
1531 skb->dev = NULL;
1532
1533 bh_lock_sock_nested(sk);
1534 ret = 0;
1535 if (!sock_owned_by_user(sk)) {
1536#ifdef CONFIG_NET_DMA
1537 struct tcp_sock *tp = tcp_sk(sk);
1538 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1539 tp->ucopy.dma_chan = net_dma_find_channel();
1540 if (tp->ucopy.dma_chan)
1541 ret = tcp_v6_do_rcv(sk, skb);
1542 else
1543#endif
1544 {
1545 if (!tcp_prequeue(sk, skb))
1546 ret = tcp_v6_do_rcv(sk, skb);
1547 }
1548 } else if (unlikely(sk_add_backlog(sk, skb,
1549 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1550 bh_unlock_sock(sk);
1551 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1552 goto discard_and_relse;
1553 }
1554 bh_unlock_sock(sk);
1555
1556 sock_put(sk);
1557 return ret ? -1 : 0;
1558
1559no_tcp_socket:
1560 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1561 goto discard_it;
1562
1563 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1564bad_packet:
1565 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1566 } else {
1567 tcp_v6_send_reset(NULL, skb);
1568 }
1569
1570discard_it:
1571
1572 /*
1573 * Discard frame
1574 */
1575
1576 kfree_skb(skb);
1577 return 0;
1578
1579discard_and_relse:
1580 sock_put(sk);
1581 goto discard_it;
1582
1583do_time_wait:
1584 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1585 inet_twsk_put(inet_twsk(sk));
1586 goto discard_it;
1587 }
1588
1589 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1590 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1591 inet_twsk_put(inet_twsk(sk));
1592 goto discard_it;
1593 }
1594
1595 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1596 case TCP_TW_SYN:
1597 {
1598 struct sock *sk2;
1599
1600 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1601 &ipv6_hdr(skb)->daddr,
1602 ntohs(th->dest), inet6_iif(skb));
1603 if (sk2 != NULL) {
1604 struct inet_timewait_sock *tw = inet_twsk(sk);
1605 inet_twsk_deschedule(tw, &tcp_death_row);
1606 inet_twsk_put(tw);
1607 sk = sk2;
1608 goto process;
1609 }
1610 /* Fall through to ACK */
1611 }
1612 case TCP_TW_ACK:
1613 tcp_v6_timewait_ack(sk, skb);
1614 break;
1615 case TCP_TW_RST:
1616 goto no_tcp_socket;
1617 case TCP_TW_SUCCESS:;
1618 }
1619 goto discard_it;
1620}
1621
1622static void tcp_v6_early_demux(struct sk_buff *skb)
1623{
1624 const struct ipv6hdr *hdr;
1625 const struct tcphdr *th;
1626 struct sock *sk;
1627
1628 if (skb->pkt_type != PACKET_HOST)
1629 return;
1630
1631 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1632 return;
1633
1634 hdr = ipv6_hdr(skb);
1635 th = tcp_hdr(skb);
1636
1637 if (th->doff < sizeof(struct tcphdr) / 4)
1638 return;
1639
1640 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1641 &hdr->saddr, th->source,
1642 &hdr->daddr, ntohs(th->dest),
1643 inet6_iif(skb));
1644 if (sk) {
1645 skb->sk = sk;
1646 skb->destructor = sock_edemux;
1647 if (sk->sk_state != TCP_TIME_WAIT) {
1648 struct dst_entry *dst = sk->sk_rx_dst;
1649
1650 if (dst)
1651 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1652 if (dst &&
1653 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1654 skb_dst_set_noref(skb, dst);
1655 }
1656 }
1657}
1658
1659static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1660 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1661 .twsk_unique = tcp_twsk_unique,
1662 .twsk_destructor= tcp_twsk_destructor,
1663};
1664
1665static const struct inet_connection_sock_af_ops ipv6_specific = {
1666 .queue_xmit = inet6_csk_xmit,
1667 .send_check = tcp_v6_send_check,
1668 .rebuild_header = inet6_sk_rebuild_header,
1669 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1670 .conn_request = tcp_v6_conn_request,
1671 .syn_recv_sock = tcp_v6_syn_recv_sock,
1672 .net_header_len = sizeof(struct ipv6hdr),
1673 .net_frag_header_len = sizeof(struct frag_hdr),
1674 .setsockopt = ipv6_setsockopt,
1675 .getsockopt = ipv6_getsockopt,
1676 .addr2sockaddr = inet6_csk_addr2sockaddr,
1677 .sockaddr_len = sizeof(struct sockaddr_in6),
1678 .bind_conflict = inet6_csk_bind_conflict,
1679#ifdef CONFIG_COMPAT
1680 .compat_setsockopt = compat_ipv6_setsockopt,
1681 .compat_getsockopt = compat_ipv6_getsockopt,
1682#endif
1683};
1684
1685#ifdef CONFIG_TCP_MD5SIG
1686static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1687 .md5_lookup = tcp_v6_md5_lookup,
1688 .calc_md5_hash = tcp_v6_md5_hash_skb,
1689 .md5_parse = tcp_v6_parse_md5_keys,
1690};
1691#endif
1692
1693/*
1694 * TCP over IPv4 via INET6 API
1695 */
1696
1697static const struct inet_connection_sock_af_ops ipv6_mapped = {
1698 .queue_xmit = ip_queue_xmit,
1699 .send_check = tcp_v4_send_check,
1700 .rebuild_header = inet_sk_rebuild_header,
1701 .sk_rx_dst_set = inet_sk_rx_dst_set,
1702 .conn_request = tcp_v6_conn_request,
1703 .syn_recv_sock = tcp_v6_syn_recv_sock,
1704 .net_header_len = sizeof(struct iphdr),
1705 .setsockopt = ipv6_setsockopt,
1706 .getsockopt = ipv6_getsockopt,
1707 .addr2sockaddr = inet6_csk_addr2sockaddr,
1708 .sockaddr_len = sizeof(struct sockaddr_in6),
1709 .bind_conflict = inet6_csk_bind_conflict,
1710#ifdef CONFIG_COMPAT
1711 .compat_setsockopt = compat_ipv6_setsockopt,
1712 .compat_getsockopt = compat_ipv6_getsockopt,
1713#endif
1714};
1715
1716#ifdef CONFIG_TCP_MD5SIG
1717static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1718 .md5_lookup = tcp_v4_md5_lookup,
1719 .calc_md5_hash = tcp_v4_md5_hash_skb,
1720 .md5_parse = tcp_v6_parse_md5_keys,
1721};
1722#endif
1723
1724/* NOTE: A lot of things set to zero explicitly by call to
1725 * sk_alloc() so need not be done here.
1726 */
1727static int tcp_v6_init_sock(struct sock *sk)
1728{
1729 struct inet_connection_sock *icsk = inet_csk(sk);
1730
1731 tcp_init_sock(sk);
1732
1733 icsk->icsk_af_ops = &ipv6_specific;
1734
1735#ifdef CONFIG_TCP_MD5SIG
1736 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1737#endif
1738
1739 return 0;
1740}
1741
1742static void tcp_v6_destroy_sock(struct sock *sk)
1743{
1744 tcp_v4_destroy_sock(sk);
1745 inet6_destroy_sock(sk);
1746}
1747
1748#ifdef CONFIG_PROC_FS
1749/* Proc filesystem TCPv6 sock list dumping. */
1750static void get_openreq6(struct seq_file *seq,
1751 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1752{
1753 int ttd = req->expires - jiffies;
1754 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1755 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1756
1757 if (ttd < 0)
1758 ttd = 0;
1759
1760 seq_printf(seq,
1761 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1762 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1763 i,
1764 src->s6_addr32[0], src->s6_addr32[1],
1765 src->s6_addr32[2], src->s6_addr32[3],
1766 ntohs(inet_rsk(req)->loc_port),
1767 dest->s6_addr32[0], dest->s6_addr32[1],
1768 dest->s6_addr32[2], dest->s6_addr32[3],
1769 ntohs(inet_rsk(req)->rmt_port),
1770 TCP_SYN_RECV,
1771 0,0, /* could print option size, but that is af dependent. */
1772 1, /* timers active (only the expire timer) */
1773 jiffies_to_clock_t(ttd),
1774 req->num_timeout,
1775 from_kuid_munged(seq_user_ns(seq), uid),
1776 0, /* non standard timer */
1777 0, /* open_requests have no inode */
1778 0, req);
1779}
1780
1781static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1782{
1783 const struct in6_addr *dest, *src;
1784 __u16 destp, srcp;
1785 int timer_active;
1786 unsigned long timer_expires;
1787 const struct inet_sock *inet = inet_sk(sp);
1788 const struct tcp_sock *tp = tcp_sk(sp);
1789 const struct inet_connection_sock *icsk = inet_csk(sp);
1790 const struct ipv6_pinfo *np = inet6_sk(sp);
1791
1792 dest = &np->daddr;
1793 src = &np->rcv_saddr;
1794 destp = ntohs(inet->inet_dport);
1795 srcp = ntohs(inet->inet_sport);
1796
1797 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1798 timer_active = 1;
1799 timer_expires = icsk->icsk_timeout;
1800 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1801 timer_active = 4;
1802 timer_expires = icsk->icsk_timeout;
1803 } else if (timer_pending(&sp->sk_timer)) {
1804 timer_active = 2;
1805 timer_expires = sp->sk_timer.expires;
1806 } else {
1807 timer_active = 0;
1808 timer_expires = jiffies;
1809 }
1810
1811 seq_printf(seq,
1812 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1813 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1814 i,
1815 src->s6_addr32[0], src->s6_addr32[1],
1816 src->s6_addr32[2], src->s6_addr32[3], srcp,
1817 dest->s6_addr32[0], dest->s6_addr32[1],
1818 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1819 sp->sk_state,
1820 tp->write_seq-tp->snd_una,
1821 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1822 timer_active,
1823 jiffies_delta_to_clock_t(timer_expires - jiffies),
1824 icsk->icsk_retransmits,
1825 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1826 icsk->icsk_probes_out,
1827 sock_i_ino(sp),
1828 atomic_read(&sp->sk_refcnt), sp,
1829 jiffies_to_clock_t(icsk->icsk_rto),
1830 jiffies_to_clock_t(icsk->icsk_ack.ato),
1831 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1832 tp->snd_cwnd,
1833 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1834 );
1835}
1836
1837static void get_timewait6_sock(struct seq_file *seq,
1838 struct inet_timewait_sock *tw, int i)
1839{
1840 const struct in6_addr *dest, *src;
1841 __u16 destp, srcp;
1842 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1843 long delta = tw->tw_ttd - jiffies;
1844
1845 dest = &tw6->tw_v6_daddr;
1846 src = &tw6->tw_v6_rcv_saddr;
1847 destp = ntohs(tw->tw_dport);
1848 srcp = ntohs(tw->tw_sport);
1849
1850 seq_printf(seq,
1851 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1852 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1853 i,
1854 src->s6_addr32[0], src->s6_addr32[1],
1855 src->s6_addr32[2], src->s6_addr32[3], srcp,
1856 dest->s6_addr32[0], dest->s6_addr32[1],
1857 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1858 tw->tw_substate, 0, 0,
1859 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1860 atomic_read(&tw->tw_refcnt), tw);
1861}
1862
1863static int tcp6_seq_show(struct seq_file *seq, void *v)
1864{
1865 struct tcp_iter_state *st;
1866
1867 if (v == SEQ_START_TOKEN) {
1868 seq_puts(seq,
1869 " sl "
1870 "local_address "
1871 "remote_address "
1872 "st tx_queue rx_queue tr tm->when retrnsmt"
1873 " uid timeout inode\n");
1874 goto out;
1875 }
1876 st = seq->private;
1877
1878 switch (st->state) {
1879 case TCP_SEQ_STATE_LISTENING:
1880 case TCP_SEQ_STATE_ESTABLISHED:
1881 get_tcp6_sock(seq, v, st->num);
1882 break;
1883 case TCP_SEQ_STATE_OPENREQ:
1884 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1885 break;
1886 case TCP_SEQ_STATE_TIME_WAIT:
1887 get_timewait6_sock(seq, v, st->num);
1888 break;
1889 }
1890out:
1891 return 0;
1892}
1893
1894static const struct file_operations tcp6_afinfo_seq_fops = {
1895 .owner = THIS_MODULE,
1896 .open = tcp_seq_open,
1897 .read = seq_read,
1898 .llseek = seq_lseek,
1899 .release = seq_release_net
1900};
1901
1902static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1903 .name = "tcp6",
1904 .family = AF_INET6,
1905 .seq_fops = &tcp6_afinfo_seq_fops,
1906 .seq_ops = {
1907 .show = tcp6_seq_show,
1908 },
1909};
1910
1911int __net_init tcp6_proc_init(struct net *net)
1912{
1913 return tcp_proc_register(net, &tcp6_seq_afinfo);
1914}
1915
1916void tcp6_proc_exit(struct net *net)
1917{
1918 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1919}
1920#endif
1921
1922struct proto tcpv6_prot = {
1923 .name = "TCPv6",
1924 .owner = THIS_MODULE,
1925 .close = tcp_close,
1926 .connect = tcp_v6_connect,
1927 .disconnect = tcp_disconnect,
1928 .accept = inet_csk_accept,
1929 .ioctl = tcp_ioctl,
1930 .init = tcp_v6_init_sock,
1931 .destroy = tcp_v6_destroy_sock,
1932 .shutdown = tcp_shutdown,
1933 .setsockopt = tcp_setsockopt,
1934 .getsockopt = tcp_getsockopt,
1935 .recvmsg = tcp_recvmsg,
1936 .sendmsg = tcp_sendmsg,
1937 .sendpage = tcp_sendpage,
1938 .backlog_rcv = tcp_v6_do_rcv,
1939 .release_cb = tcp_release_cb,
1940 .mtu_reduced = tcp_v6_mtu_reduced,
1941 .hash = tcp_v6_hash,
1942 .unhash = inet_unhash,
1943 .get_port = inet_csk_get_port,
1944 .enter_memory_pressure = tcp_enter_memory_pressure,
1945 .sockets_allocated = &tcp_sockets_allocated,
1946 .memory_allocated = &tcp_memory_allocated,
1947 .memory_pressure = &tcp_memory_pressure,
1948 .orphan_count = &tcp_orphan_count,
1949 .sysctl_wmem = sysctl_tcp_wmem,
1950 .sysctl_rmem = sysctl_tcp_rmem,
1951 .max_header = MAX_TCP_HEADER,
1952 .obj_size = sizeof(struct tcp6_sock),
1953 .slab_flags = SLAB_DESTROY_BY_RCU,
1954 .twsk_prot = &tcp6_timewait_sock_ops,
1955 .rsk_prot = &tcp6_request_sock_ops,
1956 .h.hashinfo = &tcp_hashinfo,
1957 .no_autobind = true,
1958#ifdef CONFIG_COMPAT
1959 .compat_setsockopt = compat_tcp_setsockopt,
1960 .compat_getsockopt = compat_tcp_getsockopt,
1961#endif
1962#ifdef CONFIG_MEMCG_KMEM
1963 .proto_cgroup = tcp_proto_cgroup,
1964#endif
1965};
1966
1967static const struct inet6_protocol tcpv6_protocol = {
1968 .early_demux = tcp_v6_early_demux,
1969 .handler = tcp_v6_rcv,
1970 .err_handler = tcp_v6_err,
1971 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1972};
1973
1974static struct inet_protosw tcpv6_protosw = {
1975 .type = SOCK_STREAM,
1976 .protocol = IPPROTO_TCP,
1977 .prot = &tcpv6_prot,
1978 .ops = &inet6_stream_ops,
1979 .no_check = 0,
1980 .flags = INET_PROTOSW_PERMANENT |
1981 INET_PROTOSW_ICSK,
1982};
1983
1984static int __net_init tcpv6_net_init(struct net *net)
1985{
1986 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1987 SOCK_RAW, IPPROTO_TCP, net);
1988}
1989
1990static void __net_exit tcpv6_net_exit(struct net *net)
1991{
1992 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1993}
1994
1995static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1996{
1997 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1998}
1999
2000static struct pernet_operations tcpv6_net_ops = {
2001 .init = tcpv6_net_init,
2002 .exit = tcpv6_net_exit,
2003 .exit_batch = tcpv6_net_exit_batch,
2004};
2005
2006int __init tcpv6_init(void)
2007{
2008 int ret;
2009
2010 ret = tcpv6_offload_init();
2011 if (ret)
2012 goto out;
2013
2014 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2015 if (ret)
2016 goto out_offload;
2017
2018 /* register inet6 protocol */
2019 ret = inet6_register_protosw(&tcpv6_protosw);
2020 if (ret)
2021 goto out_tcpv6_protocol;
2022
2023 ret = register_pernet_subsys(&tcpv6_net_ops);
2024 if (ret)
2025 goto out_tcpv6_protosw;
2026out:
2027 return ret;
2028
2029out_tcpv6_protosw:
2030 inet6_unregister_protosw(&tcpv6_protosw);
2031out_tcpv6_protocol:
2032 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2033out_offload:
2034 tcpv6_offload_cleanup();
2035 goto out;
2036}
2037
2038void tcpv6_exit(void)
2039{
2040 unregister_pernet_subsys(&tcpv6_net_ops);
2041 inet6_unregister_protosw(&tcpv6_protosw);
2042 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2043 tcpv6_offload_cleanup();
2044}