[IPV6]: Generalise __tcp_v6_hash, renaming it to __inet6_hash
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 #include <linux/module.h>
29 #include <linux/config.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/jiffies.h>
36 #include <linux/in.h>
37 #include <linux/in6.h>
38 #include <linux/netdevice.h>
39 #include <linux/init.h>
40 #include <linux/jhash.h>
41 #include <linux/ipsec.h>
42 #include <linux/times.h>
43
44 #include <linux/ipv6.h>
45 #include <linux/icmpv6.h>
46 #include <linux/random.h>
47
48 #include <net/tcp.h>
49 #include <net/ndisc.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/addrconf.h>
60 #include <net/snmp.h>
61 #include <net/dsfield.h>
62
63 #include <asm/uaccess.h>
64
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
67
68 static void tcp_v6_send_reset(struct sk_buff *skb);
69 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
70 static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
71 struct sk_buff *skb);
72
73 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
74 static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
75
76 static struct tcp_func ipv6_mapped;
77 static struct tcp_func ipv6_specific;
78
79 int inet6_csk_bind_conflict(const struct sock *sk,
80 const struct inet_bind_bucket *tb)
81 {
82 const struct sock *sk2;
83 const struct hlist_node *node;
84
85 /* We must walk the whole port owner list in this case. -DaveM */
86 sk_for_each_bound(sk2, node, &tb->owners) {
87 if (sk != sk2 &&
88 (!sk->sk_bound_dev_if ||
89 !sk2->sk_bound_dev_if ||
90 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
91 (!sk->sk_reuse || !sk2->sk_reuse ||
92 sk2->sk_state == TCP_LISTEN) &&
93 ipv6_rcv_saddr_equal(sk, sk2))
94 break;
95 }
96
97 return node != NULL;
98 }
99
100 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
101 {
102 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
103 inet6_csk_bind_conflict);
104 }
105
106 static void tcp_v6_hash(struct sock *sk)
107 {
108 if (sk->sk_state != TCP_CLOSE) {
109 struct tcp_sock *tp = tcp_sk(sk);
110
111 if (tp->af_specific == &ipv6_mapped) {
112 tcp_prot.hash(sk);
113 return;
114 }
115 local_bh_disable();
116 __inet6_hash(&tcp_hashinfo, sk);
117 local_bh_enable();
118 }
119 }
120
121 /*
122 * Open request hash tables.
123 */
124
125 static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd)
126 {
127 u32 a, b, c;
128
129 a = raddr->s6_addr32[0];
130 b = raddr->s6_addr32[1];
131 c = raddr->s6_addr32[2];
132
133 a += JHASH_GOLDEN_RATIO;
134 b += JHASH_GOLDEN_RATIO;
135 c += rnd;
136 __jhash_mix(a, b, c);
137
138 a += raddr->s6_addr32[3];
139 b += (u32) rport;
140 __jhash_mix(a, b, c);
141
142 return c & (TCP_SYNQ_HSIZE - 1);
143 }
144
145 static struct request_sock *tcp_v6_search_req(const struct sock *sk,
146 struct request_sock ***prevp,
147 __u16 rport,
148 struct in6_addr *raddr,
149 struct in6_addr *laddr,
150 int iif)
151 {
152 const struct inet_connection_sock *icsk = inet_csk(sk);
153 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
154 struct request_sock *req, **prev;
155
156 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
157 (req = *prev) != NULL;
158 prev = &req->dl_next) {
159 const struct tcp6_request_sock *treq = tcp6_rsk(req);
160
161 if (inet_rsk(req)->rmt_port == rport &&
162 req->rsk_ops->family == AF_INET6 &&
163 ipv6_addr_equal(&treq->rmt_addr, raddr) &&
164 ipv6_addr_equal(&treq->loc_addr, laddr) &&
165 (!treq->iif || treq->iif == iif)) {
166 BUG_TRAP(req->sk == NULL);
167 *prevp = prev;
168 return req;
169 }
170 }
171
172 return NULL;
173 }
174
175 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
176 struct in6_addr *saddr,
177 struct in6_addr *daddr,
178 unsigned long base)
179 {
180 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
181 }
182
183 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
184 {
185 if (skb->protocol == htons(ETH_P_IPV6)) {
186 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
187 skb->nh.ipv6h->saddr.s6_addr32,
188 skb->h.th->dest,
189 skb->h.th->source);
190 } else {
191 return secure_tcp_sequence_number(skb->nh.iph->daddr,
192 skb->nh.iph->saddr,
193 skb->h.th->dest,
194 skb->h.th->source);
195 }
196 }
197
198 static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
199 struct inet_timewait_sock **twp)
200 {
201 struct inet_sock *inet = inet_sk(sk);
202 const struct ipv6_pinfo *np = inet6_sk(sk);
203 const struct in6_addr *daddr = &np->rcv_saddr;
204 const struct in6_addr *saddr = &np->daddr;
205 const int dif = sk->sk_bound_dev_if;
206 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
207 unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport);
208 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
209 struct sock *sk2;
210 const struct hlist_node *node;
211 struct inet_timewait_sock *tw;
212
213 prefetch(head->chain.first);
214 write_lock(&head->lock);
215
216 /* Check TIME-WAIT sockets first. */
217 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
218 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk2);
219
220 tw = inet_twsk(sk2);
221
222 if(*((__u32 *)&(tw->tw_dport)) == ports &&
223 sk2->sk_family == PF_INET6 &&
224 ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
225 ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
226 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
227 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
228 struct tcp_sock *tp = tcp_sk(sk);
229
230 if (tcptw->tw_ts_recent_stamp &&
231 (!twp ||
232 (sysctl_tcp_tw_reuse &&
233 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
234 /* See comment in tcp_ipv4.c */
235 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
236 if (!tp->write_seq)
237 tp->write_seq = 1;
238 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
239 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
240 sock_hold(sk2);
241 goto unique;
242 } else
243 goto not_unique;
244 }
245 }
246 tw = NULL;
247
248 /* And established part... */
249 sk_for_each(sk2, node, &head->chain) {
250 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif))
251 goto not_unique;
252 }
253
254 unique:
255 BUG_TRAP(sk_unhashed(sk));
256 __sk_add_node(sk, &head->chain);
257 sk->sk_hash = hash;
258 sock_prot_inc_use(sk->sk_prot);
259 write_unlock(&head->lock);
260
261 if (twp) {
262 *twp = tw;
263 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
264 } else if (tw) {
265 /* Silly. Should hash-dance instead... */
266 inet_twsk_deschedule(tw, &tcp_death_row);
267 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
268
269 inet_twsk_put(tw);
270 }
271 return 0;
272
273 not_unique:
274 write_unlock(&head->lock);
275 return -EADDRNOTAVAIL;
276 }
277
278 static inline u32 tcpv6_port_offset(const struct sock *sk)
279 {
280 const struct inet_sock *inet = inet_sk(sk);
281 const struct ipv6_pinfo *np = inet6_sk(sk);
282
283 return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32,
284 np->daddr.s6_addr32,
285 inet->dport);
286 }
287
288 static int tcp_v6_hash_connect(struct sock *sk)
289 {
290 unsigned short snum = inet_sk(sk)->num;
291 struct inet_bind_hashbucket *head;
292 struct inet_bind_bucket *tb;
293 int ret;
294
295 if (!snum) {
296 int low = sysctl_local_port_range[0];
297 int high = sysctl_local_port_range[1];
298 int range = high - low;
299 int i;
300 int port;
301 static u32 hint;
302 u32 offset = hint + tcpv6_port_offset(sk);
303 struct hlist_node *node;
304 struct inet_timewait_sock *tw = NULL;
305
306 local_bh_disable();
307 for (i = 1; i <= range; i++) {
308 port = low + (i + offset) % range;
309 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
310 spin_lock(&head->lock);
311
312 /* Does not bother with rcv_saddr checks,
313 * because the established check is already
314 * unique enough.
315 */
316 inet_bind_bucket_for_each(tb, node, &head->chain) {
317 if (tb->port == port) {
318 BUG_TRAP(!hlist_empty(&tb->owners));
319 if (tb->fastreuse >= 0)
320 goto next_port;
321 if (!__tcp_v6_check_established(sk,
322 port,
323 &tw))
324 goto ok;
325 goto next_port;
326 }
327 }
328
329 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
330 if (!tb) {
331 spin_unlock(&head->lock);
332 break;
333 }
334 tb->fastreuse = -1;
335 goto ok;
336
337 next_port:
338 spin_unlock(&head->lock);
339 }
340 local_bh_enable();
341
342 return -EADDRNOTAVAIL;
343
344 ok:
345 hint += i;
346
347 /* Head lock still held and bh's disabled */
348 inet_bind_hash(sk, tb, port);
349 if (sk_unhashed(sk)) {
350 inet_sk(sk)->sport = htons(port);
351 __inet6_hash(&tcp_hashinfo, sk);
352 }
353 spin_unlock(&head->lock);
354
355 if (tw) {
356 inet_twsk_deschedule(tw, &tcp_death_row);
357 inet_twsk_put(tw);
358 }
359
360 ret = 0;
361 goto out;
362 }
363
364 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
365 tb = inet_csk(sk)->icsk_bind_hash;
366 spin_lock_bh(&head->lock);
367
368 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
369 __inet6_hash(&tcp_hashinfo, sk);
370 spin_unlock_bh(&head->lock);
371 return 0;
372 } else {
373 spin_unlock(&head->lock);
374 /* No definite answer... Walk to established hash table */
375 ret = __tcp_v6_check_established(sk, snum, NULL);
376 out:
377 local_bh_enable();
378 return ret;
379 }
380 }
381
382 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
383 int addr_len)
384 {
385 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
386 struct inet_sock *inet = inet_sk(sk);
387 struct ipv6_pinfo *np = inet6_sk(sk);
388 struct tcp_sock *tp = tcp_sk(sk);
389 struct in6_addr *saddr = NULL, *final_p = NULL, final;
390 struct flowi fl;
391 struct dst_entry *dst;
392 int addr_type;
393 int err;
394
395 if (addr_len < SIN6_LEN_RFC2133)
396 return -EINVAL;
397
398 if (usin->sin6_family != AF_INET6)
399 return(-EAFNOSUPPORT);
400
401 memset(&fl, 0, sizeof(fl));
402
403 if (np->sndflow) {
404 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
405 IP6_ECN_flow_init(fl.fl6_flowlabel);
406 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
407 struct ip6_flowlabel *flowlabel;
408 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
409 if (flowlabel == NULL)
410 return -EINVAL;
411 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
412 fl6_sock_release(flowlabel);
413 }
414 }
415
416 /*
417 * connect() to INADDR_ANY means loopback (BSD'ism).
418 */
419
420 if(ipv6_addr_any(&usin->sin6_addr))
421 usin->sin6_addr.s6_addr[15] = 0x1;
422
423 addr_type = ipv6_addr_type(&usin->sin6_addr);
424
425 if(addr_type & IPV6_ADDR_MULTICAST)
426 return -ENETUNREACH;
427
428 if (addr_type&IPV6_ADDR_LINKLOCAL) {
429 if (addr_len >= sizeof(struct sockaddr_in6) &&
430 usin->sin6_scope_id) {
431 /* If interface is set while binding, indices
432 * must coincide.
433 */
434 if (sk->sk_bound_dev_if &&
435 sk->sk_bound_dev_if != usin->sin6_scope_id)
436 return -EINVAL;
437
438 sk->sk_bound_dev_if = usin->sin6_scope_id;
439 }
440
441 /* Connect to link-local address requires an interface */
442 if (!sk->sk_bound_dev_if)
443 return -EINVAL;
444 }
445
446 if (tp->rx_opt.ts_recent_stamp &&
447 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
448 tp->rx_opt.ts_recent = 0;
449 tp->rx_opt.ts_recent_stamp = 0;
450 tp->write_seq = 0;
451 }
452
453 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
454 np->flow_label = fl.fl6_flowlabel;
455
456 /*
457 * TCP over IPv4
458 */
459
460 if (addr_type == IPV6_ADDR_MAPPED) {
461 u32 exthdrlen = tp->ext_header_len;
462 struct sockaddr_in sin;
463
464 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
465
466 if (__ipv6_only_sock(sk))
467 return -ENETUNREACH;
468
469 sin.sin_family = AF_INET;
470 sin.sin_port = usin->sin6_port;
471 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
472
473 tp->af_specific = &ipv6_mapped;
474 sk->sk_backlog_rcv = tcp_v4_do_rcv;
475
476 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
477
478 if (err) {
479 tp->ext_header_len = exthdrlen;
480 tp->af_specific = &ipv6_specific;
481 sk->sk_backlog_rcv = tcp_v6_do_rcv;
482 goto failure;
483 } else {
484 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
485 inet->saddr);
486 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
487 inet->rcv_saddr);
488 }
489
490 return err;
491 }
492
493 if (!ipv6_addr_any(&np->rcv_saddr))
494 saddr = &np->rcv_saddr;
495
496 fl.proto = IPPROTO_TCP;
497 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
498 ipv6_addr_copy(&fl.fl6_src,
499 (saddr ? saddr : &np->saddr));
500 fl.oif = sk->sk_bound_dev_if;
501 fl.fl_ip_dport = usin->sin6_port;
502 fl.fl_ip_sport = inet->sport;
503
504 if (np->opt && np->opt->srcrt) {
505 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
506 ipv6_addr_copy(&final, &fl.fl6_dst);
507 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
508 final_p = &final;
509 }
510
511 err = ip6_dst_lookup(sk, &dst, &fl);
512 if (err)
513 goto failure;
514 if (final_p)
515 ipv6_addr_copy(&fl.fl6_dst, final_p);
516
517 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
518 goto failure;
519
520 if (saddr == NULL) {
521 saddr = &fl.fl6_src;
522 ipv6_addr_copy(&np->rcv_saddr, saddr);
523 }
524
525 /* set the source address */
526 ipv6_addr_copy(&np->saddr, saddr);
527 inet->rcv_saddr = LOOPBACK4_IPV6;
528
529 ip6_dst_store(sk, dst, NULL);
530 sk->sk_route_caps = dst->dev->features &
531 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
532
533 tp->ext_header_len = 0;
534 if (np->opt)
535 tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
536
537 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
538
539 inet->dport = usin->sin6_port;
540
541 tcp_set_state(sk, TCP_SYN_SENT);
542 err = tcp_v6_hash_connect(sk);
543 if (err)
544 goto late_failure;
545
546 if (!tp->write_seq)
547 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
548 np->daddr.s6_addr32,
549 inet->sport,
550 inet->dport);
551
552 err = tcp_connect(sk);
553 if (err)
554 goto late_failure;
555
556 return 0;
557
558 late_failure:
559 tcp_set_state(sk, TCP_CLOSE);
560 __sk_dst_reset(sk);
561 failure:
562 inet->dport = 0;
563 sk->sk_route_caps = 0;
564 return err;
565 }
566
567 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
568 int type, int code, int offset, __u32 info)
569 {
570 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
571 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
572 struct ipv6_pinfo *np;
573 struct sock *sk;
574 int err;
575 struct tcp_sock *tp;
576 __u32 seq;
577
578 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
579 th->source, skb->dev->ifindex);
580
581 if (sk == NULL) {
582 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
583 return;
584 }
585
586 if (sk->sk_state == TCP_TIME_WAIT) {
587 inet_twsk_put((struct inet_timewait_sock *)sk);
588 return;
589 }
590
591 bh_lock_sock(sk);
592 if (sock_owned_by_user(sk))
593 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
594
595 if (sk->sk_state == TCP_CLOSE)
596 goto out;
597
598 tp = tcp_sk(sk);
599 seq = ntohl(th->seq);
600 if (sk->sk_state != TCP_LISTEN &&
601 !between(seq, tp->snd_una, tp->snd_nxt)) {
602 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
603 goto out;
604 }
605
606 np = inet6_sk(sk);
607
608 if (type == ICMPV6_PKT_TOOBIG) {
609 struct dst_entry *dst = NULL;
610
611 if (sock_owned_by_user(sk))
612 goto out;
613 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
614 goto out;
615
616 /* icmp should have updated the destination cache entry */
617 dst = __sk_dst_check(sk, np->dst_cookie);
618
619 if (dst == NULL) {
620 struct inet_sock *inet = inet_sk(sk);
621 struct flowi fl;
622
623 /* BUGGG_FUTURE: Again, it is not clear how
624 to handle rthdr case. Ignore this complexity
625 for now.
626 */
627 memset(&fl, 0, sizeof(fl));
628 fl.proto = IPPROTO_TCP;
629 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
630 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
631 fl.oif = sk->sk_bound_dev_if;
632 fl.fl_ip_dport = inet->dport;
633 fl.fl_ip_sport = inet->sport;
634
635 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
636 sk->sk_err_soft = -err;
637 goto out;
638 }
639
640 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
641 sk->sk_err_soft = -err;
642 goto out;
643 }
644
645 } else
646 dst_hold(dst);
647
648 if (tp->pmtu_cookie > dst_mtu(dst)) {
649 tcp_sync_mss(sk, dst_mtu(dst));
650 tcp_simple_retransmit(sk);
651 } /* else let the usual retransmit timer handle it */
652 dst_release(dst);
653 goto out;
654 }
655
656 icmpv6_err_convert(type, code, &err);
657
658 /* Might be for an request_sock */
659 switch (sk->sk_state) {
660 struct request_sock *req, **prev;
661 case TCP_LISTEN:
662 if (sock_owned_by_user(sk))
663 goto out;
664
665 req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr,
666 &hdr->saddr, inet6_iif(skb));
667 if (!req)
668 goto out;
669
670 /* ICMPs are not backlogged, hence we cannot get
671 * an established socket here.
672 */
673 BUG_TRAP(req->sk == NULL);
674
675 if (seq != tcp_rsk(req)->snt_isn) {
676 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
677 goto out;
678 }
679
680 inet_csk_reqsk_queue_drop(sk, req, prev);
681 goto out;
682
683 case TCP_SYN_SENT:
684 case TCP_SYN_RECV: /* Cannot happen.
685 It can, it SYNs are crossed. --ANK */
686 if (!sock_owned_by_user(sk)) {
687 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
688 sk->sk_err = err;
689 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
690
691 tcp_done(sk);
692 } else
693 sk->sk_err_soft = err;
694 goto out;
695 }
696
697 if (!sock_owned_by_user(sk) && np->recverr) {
698 sk->sk_err = err;
699 sk->sk_error_report(sk);
700 } else
701 sk->sk_err_soft = err;
702
703 out:
704 bh_unlock_sock(sk);
705 sock_put(sk);
706 }
707
708
709 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
710 struct dst_entry *dst)
711 {
712 struct tcp6_request_sock *treq = tcp6_rsk(req);
713 struct ipv6_pinfo *np = inet6_sk(sk);
714 struct sk_buff * skb;
715 struct ipv6_txoptions *opt = NULL;
716 struct in6_addr * final_p = NULL, final;
717 struct flowi fl;
718 int err = -1;
719
720 memset(&fl, 0, sizeof(fl));
721 fl.proto = IPPROTO_TCP;
722 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
723 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
724 fl.fl6_flowlabel = 0;
725 fl.oif = treq->iif;
726 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
727 fl.fl_ip_sport = inet_sk(sk)->sport;
728
729 if (dst == NULL) {
730 opt = np->opt;
731 if (opt == NULL &&
732 np->rxopt.bits.osrcrt == 2 &&
733 treq->pktopts) {
734 struct sk_buff *pktopts = treq->pktopts;
735 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
736 if (rxopt->srcrt)
737 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
738 }
739
740 if (opt && opt->srcrt) {
741 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
742 ipv6_addr_copy(&final, &fl.fl6_dst);
743 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
744 final_p = &final;
745 }
746
747 err = ip6_dst_lookup(sk, &dst, &fl);
748 if (err)
749 goto done;
750 if (final_p)
751 ipv6_addr_copy(&fl.fl6_dst, final_p);
752 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
753 goto done;
754 }
755
756 skb = tcp_make_synack(sk, dst, req);
757 if (skb) {
758 struct tcphdr *th = skb->h.th;
759
760 th->check = tcp_v6_check(th, skb->len,
761 &treq->loc_addr, &treq->rmt_addr,
762 csum_partial((char *)th, skb->len, skb->csum));
763
764 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
765 err = ip6_xmit(sk, skb, &fl, opt, 0);
766 if (err == NET_XMIT_CN)
767 err = 0;
768 }
769
770 done:
771 if (opt && opt != np->opt)
772 sock_kfree_s(sk, opt, opt->tot_len);
773 return err;
774 }
775
776 static void tcp_v6_reqsk_destructor(struct request_sock *req)
777 {
778 if (tcp6_rsk(req)->pktopts)
779 kfree_skb(tcp6_rsk(req)->pktopts);
780 }
781
782 static struct request_sock_ops tcp6_request_sock_ops = {
783 .family = AF_INET6,
784 .obj_size = sizeof(struct tcp6_request_sock),
785 .rtx_syn_ack = tcp_v6_send_synack,
786 .send_ack = tcp_v6_reqsk_send_ack,
787 .destructor = tcp_v6_reqsk_destructor,
788 .send_reset = tcp_v6_send_reset
789 };
790
791 static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
792 {
793 struct ipv6_pinfo *np = inet6_sk(sk);
794 struct inet6_skb_parm *opt = IP6CB(skb);
795
796 if (np->rxopt.all) {
797 if ((opt->hop && (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) ||
798 ((IPV6_FLOWINFO_MASK & *(u32*)skb->nh.raw) && np->rxopt.bits.rxflow) ||
799 (opt->srcrt && (np->rxopt.bits.srcrt || np->rxopt.bits.osrcrt)) ||
800 ((opt->dst1 || opt->dst0) && (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts)))
801 return 1;
802 }
803 return 0;
804 }
805
806
807 static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
808 struct sk_buff *skb)
809 {
810 struct ipv6_pinfo *np = inet6_sk(sk);
811
812 if (skb->ip_summed == CHECKSUM_HW) {
813 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
814 skb->csum = offsetof(struct tcphdr, check);
815 } else {
816 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
817 csum_partial((char *)th, th->doff<<2,
818 skb->csum));
819 }
820 }
821
822
823 static void tcp_v6_send_reset(struct sk_buff *skb)
824 {
825 struct tcphdr *th = skb->h.th, *t1;
826 struct sk_buff *buff;
827 struct flowi fl;
828
829 if (th->rst)
830 return;
831
832 if (!ipv6_unicast_destination(skb))
833 return;
834
835 /*
836 * We need to grab some memory, and put together an RST,
837 * and then put it into the queue to be sent.
838 */
839
840 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
841 GFP_ATOMIC);
842 if (buff == NULL)
843 return;
844
845 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
846
847 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
848
849 /* Swap the send and the receive. */
850 memset(t1, 0, sizeof(*t1));
851 t1->dest = th->source;
852 t1->source = th->dest;
853 t1->doff = sizeof(*t1)/4;
854 t1->rst = 1;
855
856 if(th->ack) {
857 t1->seq = th->ack_seq;
858 } else {
859 t1->ack = 1;
860 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
861 + skb->len - (th->doff<<2));
862 }
863
864 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
865
866 memset(&fl, 0, sizeof(fl));
867 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
868 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
869
870 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
871 sizeof(*t1), IPPROTO_TCP,
872 buff->csum);
873
874 fl.proto = IPPROTO_TCP;
875 fl.oif = inet6_iif(skb);
876 fl.fl_ip_dport = t1->dest;
877 fl.fl_ip_sport = t1->source;
878
879 /* sk = NULL, but it is safe for now. RST socket required. */
880 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
881
882 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
883 ip6_xmit(NULL, buff, &fl, NULL, 0);
884 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
885 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
886 return;
887 }
888 }
889
890 kfree_skb(buff);
891 }
892
893 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
894 {
895 struct tcphdr *th = skb->h.th, *t1;
896 struct sk_buff *buff;
897 struct flowi fl;
898 int tot_len = sizeof(struct tcphdr);
899
900 if (ts)
901 tot_len += 3*4;
902
903 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
904 GFP_ATOMIC);
905 if (buff == NULL)
906 return;
907
908 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
909
910 t1 = (struct tcphdr *) skb_push(buff,tot_len);
911
912 /* Swap the send and the receive. */
913 memset(t1, 0, sizeof(*t1));
914 t1->dest = th->source;
915 t1->source = th->dest;
916 t1->doff = tot_len/4;
917 t1->seq = htonl(seq);
918 t1->ack_seq = htonl(ack);
919 t1->ack = 1;
920 t1->window = htons(win);
921
922 if (ts) {
923 u32 *ptr = (u32*)(t1 + 1);
924 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
925 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
926 *ptr++ = htonl(tcp_time_stamp);
927 *ptr = htonl(ts);
928 }
929
930 buff->csum = csum_partial((char *)t1, tot_len, 0);
931
932 memset(&fl, 0, sizeof(fl));
933 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
934 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
935
936 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
937 tot_len, IPPROTO_TCP,
938 buff->csum);
939
940 fl.proto = IPPROTO_TCP;
941 fl.oif = inet6_iif(skb);
942 fl.fl_ip_dport = t1->dest;
943 fl.fl_ip_sport = t1->source;
944
945 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
946 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
947 ip6_xmit(NULL, buff, &fl, NULL, 0);
948 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
949 return;
950 }
951 }
952
953 kfree_skb(buff);
954 }
955
956 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
957 {
958 struct inet_timewait_sock *tw = inet_twsk(sk);
959 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
960
961 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
962 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
963 tcptw->tw_ts_recent);
964
965 inet_twsk_put(tw);
966 }
967
968 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
969 {
970 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
971 }
972
973
974 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
975 {
976 struct request_sock *req, **prev;
977 const struct tcphdr *th = skb->h.th;
978 struct sock *nsk;
979
980 /* Find possible connection requests. */
981 req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr,
982 &skb->nh.ipv6h->daddr, inet6_iif(skb));
983 if (req)
984 return tcp_check_req(sk, skb, req, prev);
985
986 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
987 th->source, &skb->nh.ipv6h->daddr,
988 ntohs(th->dest), inet6_iif(skb));
989
990 if (nsk) {
991 if (nsk->sk_state != TCP_TIME_WAIT) {
992 bh_lock_sock(nsk);
993 return nsk;
994 }
995 inet_twsk_put((struct inet_timewait_sock *)nsk);
996 return NULL;
997 }
998
999 #if 0 /*def CONFIG_SYN_COOKIES*/
1000 if (!th->rst && !th->syn && th->ack)
1001 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1002 #endif
1003 return sk;
1004 }
1005
1006 static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
1007 {
1008 struct inet_connection_sock *icsk = inet_csk(sk);
1009 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
1010 const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
1011
1012 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
1013 inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
1014 }
1015
1016
1017 /* FIXME: this is substantially similar to the ipv4 code.
1018 * Can some kind of merge be done? -- erics
1019 */
1020 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1021 {
1022 struct tcp6_request_sock *treq;
1023 struct ipv6_pinfo *np = inet6_sk(sk);
1024 struct tcp_options_received tmp_opt;
1025 struct tcp_sock *tp = tcp_sk(sk);
1026 struct request_sock *req = NULL;
1027 __u32 isn = TCP_SKB_CB(skb)->when;
1028
1029 if (skb->protocol == htons(ETH_P_IP))
1030 return tcp_v4_conn_request(sk, skb);
1031
1032 if (!ipv6_unicast_destination(skb))
1033 goto drop;
1034
1035 /*
1036 * There are no SYN attacks on IPv6, yet...
1037 */
1038 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1039 if (net_ratelimit())
1040 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1041 goto drop;
1042 }
1043
1044 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1045 goto drop;
1046
1047 req = reqsk_alloc(&tcp6_request_sock_ops);
1048 if (req == NULL)
1049 goto drop;
1050
1051 tcp_clear_options(&tmp_opt);
1052 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1053 tmp_opt.user_mss = tp->rx_opt.user_mss;
1054
1055 tcp_parse_options(skb, &tmp_opt, 0);
1056
1057 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1058 tcp_openreq_init(req, &tmp_opt, skb);
1059
1060 treq = tcp6_rsk(req);
1061 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
1062 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
1063 TCP_ECN_create_request(req, skb->h.th);
1064 treq->pktopts = NULL;
1065 if (ipv6_opt_accepted(sk, skb) ||
1066 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1067 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1068 atomic_inc(&skb->users);
1069 treq->pktopts = skb;
1070 }
1071 treq->iif = sk->sk_bound_dev_if;
1072
1073 /* So that link locals have meaning */
1074 if (!sk->sk_bound_dev_if &&
1075 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1076 treq->iif = inet6_iif(skb);
1077
1078 if (isn == 0)
1079 isn = tcp_v6_init_sequence(sk,skb);
1080
1081 tcp_rsk(req)->snt_isn = isn;
1082
1083 if (tcp_v6_send_synack(sk, req, NULL))
1084 goto drop;
1085
1086 tcp_v6_synq_add(sk, req);
1087
1088 return 0;
1089
1090 drop:
1091 if (req)
1092 reqsk_free(req);
1093
1094 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1095 return 0; /* don't send reset */
1096 }
1097
1098 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1099 struct request_sock *req,
1100 struct dst_entry *dst)
1101 {
1102 struct tcp6_request_sock *treq = tcp6_rsk(req);
1103 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1104 struct tcp6_sock *newtcp6sk;
1105 struct inet_sock *newinet;
1106 struct tcp_sock *newtp;
1107 struct sock *newsk;
1108 struct ipv6_txoptions *opt;
1109
1110 if (skb->protocol == htons(ETH_P_IP)) {
1111 /*
1112 * v6 mapped
1113 */
1114
1115 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1116
1117 if (newsk == NULL)
1118 return NULL;
1119
1120 newtcp6sk = (struct tcp6_sock *)newsk;
1121 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1122
1123 newinet = inet_sk(newsk);
1124 newnp = inet6_sk(newsk);
1125 newtp = tcp_sk(newsk);
1126
1127 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1128
1129 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1130 newinet->daddr);
1131
1132 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1133 newinet->saddr);
1134
1135 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1136
1137 newtp->af_specific = &ipv6_mapped;
1138 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1139 newnp->pktoptions = NULL;
1140 newnp->opt = NULL;
1141 newnp->mcast_oif = inet6_iif(skb);
1142 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1143
1144 /*
1145 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1146 * here, tcp_create_openreq_child now does this for us, see the comment in
1147 * that function for the gory details. -acme
1148 */
1149
1150 /* It is tricky place. Until this moment IPv4 tcp
1151 worked with IPv6 af_tcp.af_specific.
1152 Sync it now.
1153 */
1154 tcp_sync_mss(newsk, newtp->pmtu_cookie);
1155
1156 return newsk;
1157 }
1158
1159 opt = np->opt;
1160
1161 if (sk_acceptq_is_full(sk))
1162 goto out_overflow;
1163
1164 if (np->rxopt.bits.osrcrt == 2 &&
1165 opt == NULL && treq->pktopts) {
1166 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1167 if (rxopt->srcrt)
1168 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
1169 }
1170
1171 if (dst == NULL) {
1172 struct in6_addr *final_p = NULL, final;
1173 struct flowi fl;
1174
1175 memset(&fl, 0, sizeof(fl));
1176 fl.proto = IPPROTO_TCP;
1177 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1178 if (opt && opt->srcrt) {
1179 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1180 ipv6_addr_copy(&final, &fl.fl6_dst);
1181 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1182 final_p = &final;
1183 }
1184 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1185 fl.oif = sk->sk_bound_dev_if;
1186 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1187 fl.fl_ip_sport = inet_sk(sk)->sport;
1188
1189 if (ip6_dst_lookup(sk, &dst, &fl))
1190 goto out;
1191
1192 if (final_p)
1193 ipv6_addr_copy(&fl.fl6_dst, final_p);
1194
1195 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1196 goto out;
1197 }
1198
1199 newsk = tcp_create_openreq_child(sk, req, skb);
1200 if (newsk == NULL)
1201 goto out;
1202
1203 /*
1204 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1205 * count here, tcp_create_openreq_child now does this for us, see the
1206 * comment in that function for the gory details. -acme
1207 */
1208
1209 ip6_dst_store(newsk, dst, NULL);
1210 newsk->sk_route_caps = dst->dev->features &
1211 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1212
1213 newtcp6sk = (struct tcp6_sock *)newsk;
1214 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1215
1216 newtp = tcp_sk(newsk);
1217 newinet = inet_sk(newsk);
1218 newnp = inet6_sk(newsk);
1219
1220 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1221
1222 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1223 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1224 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1225 newsk->sk_bound_dev_if = treq->iif;
1226
1227 /* Now IPv6 options...
1228
1229 First: no IPv4 options.
1230 */
1231 newinet->opt = NULL;
1232
1233 /* Clone RX bits */
1234 newnp->rxopt.all = np->rxopt.all;
1235
1236 /* Clone pktoptions received with SYN */
1237 newnp->pktoptions = NULL;
1238 if (treq->pktopts != NULL) {
1239 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1240 kfree_skb(treq->pktopts);
1241 treq->pktopts = NULL;
1242 if (newnp->pktoptions)
1243 skb_set_owner_r(newnp->pktoptions, newsk);
1244 }
1245 newnp->opt = NULL;
1246 newnp->mcast_oif = inet6_iif(skb);
1247 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1248
1249 /* Clone native IPv6 options from listening socket (if any)
1250
1251 Yes, keeping reference count would be much more clever,
1252 but we make one more one thing there: reattach optmem
1253 to newsk.
1254 */
1255 if (opt) {
1256 newnp->opt = ipv6_dup_options(newsk, opt);
1257 if (opt != np->opt)
1258 sock_kfree_s(sk, opt, opt->tot_len);
1259 }
1260
1261 newtp->ext_header_len = 0;
1262 if (newnp->opt)
1263 newtp->ext_header_len = newnp->opt->opt_nflen +
1264 newnp->opt->opt_flen;
1265
1266 tcp_sync_mss(newsk, dst_mtu(dst));
1267 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1268 tcp_initialize_rcv_mss(newsk);
1269
1270 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1271
1272 __inet6_hash(&tcp_hashinfo, newsk);
1273 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1274
1275 return newsk;
1276
1277 out_overflow:
1278 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1279 out:
1280 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1281 if (opt && opt != np->opt)
1282 sock_kfree_s(sk, opt, opt->tot_len);
1283 dst_release(dst);
1284 return NULL;
1285 }
1286
1287 static int tcp_v6_checksum_init(struct sk_buff *skb)
1288 {
1289 if (skb->ip_summed == CHECKSUM_HW) {
1290 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1291 &skb->nh.ipv6h->daddr,skb->csum)) {
1292 skb->ip_summed = CHECKSUM_UNNECESSARY;
1293 return 0;
1294 }
1295 }
1296
1297 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1298 &skb->nh.ipv6h->daddr, 0);
1299
1300 if (skb->len <= 76) {
1301 return __skb_checksum_complete(skb);
1302 }
1303 return 0;
1304 }
1305
1306 /* The socket must have it's spinlock held when we get
1307 * here.
1308 *
1309 * We have a potential double-lock case here, so even when
1310 * doing backlog processing we use the BH locking scheme.
1311 * This is because we cannot sleep with the original spinlock
1312 * held.
1313 */
1314 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1315 {
1316 struct ipv6_pinfo *np = inet6_sk(sk);
1317 struct tcp_sock *tp;
1318 struct sk_buff *opt_skb = NULL;
1319
1320 /* Imagine: socket is IPv6. IPv4 packet arrives,
1321 goes to IPv4 receive handler and backlogged.
1322 From backlog it always goes here. Kerboom...
1323 Fortunately, tcp_rcv_established and rcv_established
1324 handle them correctly, but it is not case with
1325 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1326 */
1327
1328 if (skb->protocol == htons(ETH_P_IP))
1329 return tcp_v4_do_rcv(sk, skb);
1330
1331 if (sk_filter(sk, skb, 0))
1332 goto discard;
1333
1334 /*
1335 * socket locking is here for SMP purposes as backlog rcv
1336 * is currently called with bh processing disabled.
1337 */
1338
1339 /* Do Stevens' IPV6_PKTOPTIONS.
1340
1341 Yes, guys, it is the only place in our code, where we
1342 may make it not affecting IPv4.
1343 The rest of code is protocol independent,
1344 and I do not like idea to uglify IPv4.
1345
1346 Actually, all the idea behind IPV6_PKTOPTIONS
1347 looks not very well thought. For now we latch
1348 options, received in the last packet, enqueued
1349 by tcp. Feel free to propose better solution.
1350 --ANK (980728)
1351 */
1352 if (np->rxopt.all)
1353 opt_skb = skb_clone(skb, GFP_ATOMIC);
1354
1355 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1356 TCP_CHECK_TIMER(sk);
1357 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1358 goto reset;
1359 TCP_CHECK_TIMER(sk);
1360 if (opt_skb)
1361 goto ipv6_pktoptions;
1362 return 0;
1363 }
1364
1365 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1366 goto csum_err;
1367
1368 if (sk->sk_state == TCP_LISTEN) {
1369 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1370 if (!nsk)
1371 goto discard;
1372
1373 /*
1374 * Queue it on the new socket if the new socket is active,
1375 * otherwise we just shortcircuit this and continue with
1376 * the new socket..
1377 */
1378 if(nsk != sk) {
1379 if (tcp_child_process(sk, nsk, skb))
1380 goto reset;
1381 if (opt_skb)
1382 __kfree_skb(opt_skb);
1383 return 0;
1384 }
1385 }
1386
1387 TCP_CHECK_TIMER(sk);
1388 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1389 goto reset;
1390 TCP_CHECK_TIMER(sk);
1391 if (opt_skb)
1392 goto ipv6_pktoptions;
1393 return 0;
1394
1395 reset:
1396 tcp_v6_send_reset(skb);
1397 discard:
1398 if (opt_skb)
1399 __kfree_skb(opt_skb);
1400 kfree_skb(skb);
1401 return 0;
1402 csum_err:
1403 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1404 goto discard;
1405
1406
1407 ipv6_pktoptions:
1408 /* Do you ask, what is it?
1409
1410 1. skb was enqueued by tcp.
1411 2. skb is added to tail of read queue, rather than out of order.
1412 3. socket is not in passive state.
1413 4. Finally, it really contains options, which user wants to receive.
1414 */
1415 tp = tcp_sk(sk);
1416 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1417 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1418 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1419 np->mcast_oif = inet6_iif(opt_skb);
1420 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1421 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1422 if (ipv6_opt_accepted(sk, opt_skb)) {
1423 skb_set_owner_r(opt_skb, sk);
1424 opt_skb = xchg(&np->pktoptions, opt_skb);
1425 } else {
1426 __kfree_skb(opt_skb);
1427 opt_skb = xchg(&np->pktoptions, NULL);
1428 }
1429 }
1430
1431 if (opt_skb)
1432 kfree_skb(opt_skb);
1433 return 0;
1434 }
1435
1436 static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1437 {
1438 struct sk_buff *skb = *pskb;
1439 struct tcphdr *th;
1440 struct sock *sk;
1441 int ret;
1442
1443 if (skb->pkt_type != PACKET_HOST)
1444 goto discard_it;
1445
1446 /*
1447 * Count it even if it's bad.
1448 */
1449 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1450
1451 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1452 goto discard_it;
1453
1454 th = skb->h.th;
1455
1456 if (th->doff < sizeof(struct tcphdr)/4)
1457 goto bad_packet;
1458 if (!pskb_may_pull(skb, th->doff*4))
1459 goto discard_it;
1460
1461 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1462 tcp_v6_checksum_init(skb)))
1463 goto bad_packet;
1464
1465 th = skb->h.th;
1466 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1467 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1468 skb->len - th->doff*4);
1469 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1470 TCP_SKB_CB(skb)->when = 0;
1471 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1472 TCP_SKB_CB(skb)->sacked = 0;
1473
1474 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1475 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1476 inet6_iif(skb));
1477
1478 if (!sk)
1479 goto no_tcp_socket;
1480
1481 process:
1482 if (sk->sk_state == TCP_TIME_WAIT)
1483 goto do_time_wait;
1484
1485 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1486 goto discard_and_relse;
1487
1488 if (sk_filter(sk, skb, 0))
1489 goto discard_and_relse;
1490
1491 skb->dev = NULL;
1492
1493 bh_lock_sock(sk);
1494 ret = 0;
1495 if (!sock_owned_by_user(sk)) {
1496 if (!tcp_prequeue(sk, skb))
1497 ret = tcp_v6_do_rcv(sk, skb);
1498 } else
1499 sk_add_backlog(sk, skb);
1500 bh_unlock_sock(sk);
1501
1502 sock_put(sk);
1503 return ret ? -1 : 0;
1504
1505 no_tcp_socket:
1506 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1507 goto discard_it;
1508
1509 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1510 bad_packet:
1511 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1512 } else {
1513 tcp_v6_send_reset(skb);
1514 }
1515
1516 discard_it:
1517
1518 /*
1519 * Discard frame
1520 */
1521
1522 kfree_skb(skb);
1523 return 0;
1524
1525 discard_and_relse:
1526 sock_put(sk);
1527 goto discard_it;
1528
1529 do_time_wait:
1530 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1531 inet_twsk_put((struct inet_timewait_sock *)sk);
1532 goto discard_it;
1533 }
1534
1535 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1536 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1537 inet_twsk_put((struct inet_timewait_sock *)sk);
1538 goto discard_it;
1539 }
1540
1541 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1542 skb, th)) {
1543 case TCP_TW_SYN:
1544 {
1545 struct sock *sk2;
1546
1547 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1548 &skb->nh.ipv6h->daddr,
1549 ntohs(th->dest), inet6_iif(skb));
1550 if (sk2 != NULL) {
1551 struct inet_timewait_sock *tw = inet_twsk(sk);
1552 inet_twsk_deschedule(tw, &tcp_death_row);
1553 inet_twsk_put(tw);
1554 sk = sk2;
1555 goto process;
1556 }
1557 /* Fall through to ACK */
1558 }
1559 case TCP_TW_ACK:
1560 tcp_v6_timewait_ack(sk, skb);
1561 break;
1562 case TCP_TW_RST:
1563 goto no_tcp_socket;
1564 case TCP_TW_SUCCESS:;
1565 }
1566 goto discard_it;
1567 }
1568
1569 static int tcp_v6_rebuild_header(struct sock *sk)
1570 {
1571 int err;
1572 struct dst_entry *dst;
1573 struct ipv6_pinfo *np = inet6_sk(sk);
1574
1575 dst = __sk_dst_check(sk, np->dst_cookie);
1576
1577 if (dst == NULL) {
1578 struct inet_sock *inet = inet_sk(sk);
1579 struct in6_addr *final_p = NULL, final;
1580 struct flowi fl;
1581
1582 memset(&fl, 0, sizeof(fl));
1583 fl.proto = IPPROTO_TCP;
1584 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1585 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1586 fl.fl6_flowlabel = np->flow_label;
1587 fl.oif = sk->sk_bound_dev_if;
1588 fl.fl_ip_dport = inet->dport;
1589 fl.fl_ip_sport = inet->sport;
1590
1591 if (np->opt && np->opt->srcrt) {
1592 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1593 ipv6_addr_copy(&final, &fl.fl6_dst);
1594 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1595 final_p = &final;
1596 }
1597
1598 err = ip6_dst_lookup(sk, &dst, &fl);
1599 if (err) {
1600 sk->sk_route_caps = 0;
1601 return err;
1602 }
1603 if (final_p)
1604 ipv6_addr_copy(&fl.fl6_dst, final_p);
1605
1606 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
1607 sk->sk_err_soft = -err;
1608 return err;
1609 }
1610
1611 ip6_dst_store(sk, dst, NULL);
1612 sk->sk_route_caps = dst->dev->features &
1613 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1614 }
1615
1616 return 0;
1617 }
1618
1619 static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok)
1620 {
1621 struct sock *sk = skb->sk;
1622 struct inet_sock *inet = inet_sk(sk);
1623 struct ipv6_pinfo *np = inet6_sk(sk);
1624 struct flowi fl;
1625 struct dst_entry *dst;
1626 struct in6_addr *final_p = NULL, final;
1627
1628 memset(&fl, 0, sizeof(fl));
1629 fl.proto = IPPROTO_TCP;
1630 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1631 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1632 fl.fl6_flowlabel = np->flow_label;
1633 IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
1634 fl.oif = sk->sk_bound_dev_if;
1635 fl.fl_ip_sport = inet->sport;
1636 fl.fl_ip_dport = inet->dport;
1637
1638 if (np->opt && np->opt->srcrt) {
1639 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1640 ipv6_addr_copy(&final, &fl.fl6_dst);
1641 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1642 final_p = &final;
1643 }
1644
1645 dst = __sk_dst_check(sk, np->dst_cookie);
1646
1647 if (dst == NULL) {
1648 int err = ip6_dst_lookup(sk, &dst, &fl);
1649
1650 if (err) {
1651 sk->sk_err_soft = -err;
1652 return err;
1653 }
1654
1655 if (final_p)
1656 ipv6_addr_copy(&fl.fl6_dst, final_p);
1657
1658 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
1659 sk->sk_route_caps = 0;
1660 return err;
1661 }
1662
1663 ip6_dst_store(sk, dst, NULL);
1664 sk->sk_route_caps = dst->dev->features &
1665 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1666 }
1667
1668 skb->dst = dst_clone(dst);
1669
1670 /* Restore final destination back after routing done */
1671 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1672
1673 return ip6_xmit(sk, skb, &fl, np->opt, 0);
1674 }
1675
1676 static void v6_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1677 {
1678 struct ipv6_pinfo *np = inet6_sk(sk);
1679 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
1680
1681 sin6->sin6_family = AF_INET6;
1682 ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
1683 sin6->sin6_port = inet_sk(sk)->dport;
1684 /* We do not store received flowlabel for TCP */
1685 sin6->sin6_flowinfo = 0;
1686 sin6->sin6_scope_id = 0;
1687 if (sk->sk_bound_dev_if &&
1688 ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1689 sin6->sin6_scope_id = sk->sk_bound_dev_if;
1690 }
1691
1692 static int tcp_v6_remember_stamp(struct sock *sk)
1693 {
1694 /* Alas, not yet... */
1695 return 0;
1696 }
1697
1698 static struct tcp_func ipv6_specific = {
1699 .queue_xmit = tcp_v6_xmit,
1700 .send_check = tcp_v6_send_check,
1701 .rebuild_header = tcp_v6_rebuild_header,
1702 .conn_request = tcp_v6_conn_request,
1703 .syn_recv_sock = tcp_v6_syn_recv_sock,
1704 .remember_stamp = tcp_v6_remember_stamp,
1705 .net_header_len = sizeof(struct ipv6hdr),
1706
1707 .setsockopt = ipv6_setsockopt,
1708 .getsockopt = ipv6_getsockopt,
1709 .addr2sockaddr = v6_addr2sockaddr,
1710 .sockaddr_len = sizeof(struct sockaddr_in6)
1711 };
1712
1713 /*
1714 * TCP over IPv4 via INET6 API
1715 */
1716
1717 static struct tcp_func ipv6_mapped = {
1718 .queue_xmit = ip_queue_xmit,
1719 .send_check = tcp_v4_send_check,
1720 .rebuild_header = inet_sk_rebuild_header,
1721 .conn_request = tcp_v6_conn_request,
1722 .syn_recv_sock = tcp_v6_syn_recv_sock,
1723 .remember_stamp = tcp_v4_remember_stamp,
1724 .net_header_len = sizeof(struct iphdr),
1725
1726 .setsockopt = ipv6_setsockopt,
1727 .getsockopt = ipv6_getsockopt,
1728 .addr2sockaddr = v6_addr2sockaddr,
1729 .sockaddr_len = sizeof(struct sockaddr_in6)
1730 };
1731
1732
1733
1734 /* NOTE: A lot of things set to zero explicitly by call to
1735 * sk_alloc() so need not be done here.
1736 */
1737 static int tcp_v6_init_sock(struct sock *sk)
1738 {
1739 struct inet_connection_sock *icsk = inet_csk(sk);
1740 struct tcp_sock *tp = tcp_sk(sk);
1741
1742 skb_queue_head_init(&tp->out_of_order_queue);
1743 tcp_init_xmit_timers(sk);
1744 tcp_prequeue_init(tp);
1745
1746 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1747 tp->mdev = TCP_TIMEOUT_INIT;
1748
1749 /* So many TCP implementations out there (incorrectly) count the
1750 * initial SYN frame in their delayed-ACK and congestion control
1751 * algorithms that we must have the following bandaid to talk
1752 * efficiently to them. -DaveM
1753 */
1754 tp->snd_cwnd = 2;
1755
1756 /* See draft-stevens-tcpca-spec-01 for discussion of the
1757 * initialization of these values.
1758 */
1759 tp->snd_ssthresh = 0x7fffffff;
1760 tp->snd_cwnd_clamp = ~0;
1761 tp->mss_cache = 536;
1762
1763 tp->reordering = sysctl_tcp_reordering;
1764
1765 sk->sk_state = TCP_CLOSE;
1766
1767 tp->af_specific = &ipv6_specific;
1768 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1769 sk->sk_write_space = sk_stream_write_space;
1770 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1771
1772 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1773 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1774
1775 atomic_inc(&tcp_sockets_allocated);
1776
1777 return 0;
1778 }
1779
1780 static int tcp_v6_destroy_sock(struct sock *sk)
1781 {
1782 tcp_v4_destroy_sock(sk);
1783 return inet6_destroy_sock(sk);
1784 }
1785
1786 /* Proc filesystem TCPv6 sock list dumping. */
1787 static void get_openreq6(struct seq_file *seq,
1788 struct sock *sk, struct request_sock *req, int i, int uid)
1789 {
1790 struct in6_addr *dest, *src;
1791 int ttd = req->expires - jiffies;
1792
1793 if (ttd < 0)
1794 ttd = 0;
1795
1796 src = &tcp6_rsk(req)->loc_addr;
1797 dest = &tcp6_rsk(req)->rmt_addr;
1798 seq_printf(seq,
1799 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1800 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1801 i,
1802 src->s6_addr32[0], src->s6_addr32[1],
1803 src->s6_addr32[2], src->s6_addr32[3],
1804 ntohs(inet_sk(sk)->sport),
1805 dest->s6_addr32[0], dest->s6_addr32[1],
1806 dest->s6_addr32[2], dest->s6_addr32[3],
1807 ntohs(inet_rsk(req)->rmt_port),
1808 TCP_SYN_RECV,
1809 0,0, /* could print option size, but that is af dependent. */
1810 1, /* timers active (only the expire timer) */
1811 jiffies_to_clock_t(ttd),
1812 req->retrans,
1813 uid,
1814 0, /* non standard timer */
1815 0, /* open_requests have no inode */
1816 0, req);
1817 }
1818
1819 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1820 {
1821 struct in6_addr *dest, *src;
1822 __u16 destp, srcp;
1823 int timer_active;
1824 unsigned long timer_expires;
1825 struct inet_sock *inet = inet_sk(sp);
1826 struct tcp_sock *tp = tcp_sk(sp);
1827 const struct inet_connection_sock *icsk = inet_csk(sp);
1828 struct ipv6_pinfo *np = inet6_sk(sp);
1829
1830 dest = &np->daddr;
1831 src = &np->rcv_saddr;
1832 destp = ntohs(inet->dport);
1833 srcp = ntohs(inet->sport);
1834
1835 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1836 timer_active = 1;
1837 timer_expires = icsk->icsk_timeout;
1838 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1839 timer_active = 4;
1840 timer_expires = icsk->icsk_timeout;
1841 } else if (timer_pending(&sp->sk_timer)) {
1842 timer_active = 2;
1843 timer_expires = sp->sk_timer.expires;
1844 } else {
1845 timer_active = 0;
1846 timer_expires = jiffies;
1847 }
1848
1849 seq_printf(seq,
1850 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1851 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1852 i,
1853 src->s6_addr32[0], src->s6_addr32[1],
1854 src->s6_addr32[2], src->s6_addr32[3], srcp,
1855 dest->s6_addr32[0], dest->s6_addr32[1],
1856 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1857 sp->sk_state,
1858 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
1859 timer_active,
1860 jiffies_to_clock_t(timer_expires - jiffies),
1861 icsk->icsk_retransmits,
1862 sock_i_uid(sp),
1863 icsk->icsk_probes_out,
1864 sock_i_ino(sp),
1865 atomic_read(&sp->sk_refcnt), sp,
1866 icsk->icsk_rto,
1867 icsk->icsk_ack.ato,
1868 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1869 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1870 );
1871 }
1872
1873 static void get_timewait6_sock(struct seq_file *seq,
1874 struct inet_timewait_sock *tw, int i)
1875 {
1876 struct in6_addr *dest, *src;
1877 __u16 destp, srcp;
1878 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
1879 int ttd = tw->tw_ttd - jiffies;
1880
1881 if (ttd < 0)
1882 ttd = 0;
1883
1884 dest = &tcp6tw->tw_v6_daddr;
1885 src = &tcp6tw->tw_v6_rcv_saddr;
1886 destp = ntohs(tw->tw_dport);
1887 srcp = ntohs(tw->tw_sport);
1888
1889 seq_printf(seq,
1890 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1891 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1892 i,
1893 src->s6_addr32[0], src->s6_addr32[1],
1894 src->s6_addr32[2], src->s6_addr32[3], srcp,
1895 dest->s6_addr32[0], dest->s6_addr32[1],
1896 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1897 tw->tw_substate, 0, 0,
1898 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1899 atomic_read(&tw->tw_refcnt), tw);
1900 }
1901
1902 #ifdef CONFIG_PROC_FS
1903 static int tcp6_seq_show(struct seq_file *seq, void *v)
1904 {
1905 struct tcp_iter_state *st;
1906
1907 if (v == SEQ_START_TOKEN) {
1908 seq_puts(seq,
1909 " sl "
1910 "local_address "
1911 "remote_address "
1912 "st tx_queue rx_queue tr tm->when retrnsmt"
1913 " uid timeout inode\n");
1914 goto out;
1915 }
1916 st = seq->private;
1917
1918 switch (st->state) {
1919 case TCP_SEQ_STATE_LISTENING:
1920 case TCP_SEQ_STATE_ESTABLISHED:
1921 get_tcp6_sock(seq, v, st->num);
1922 break;
1923 case TCP_SEQ_STATE_OPENREQ:
1924 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1925 break;
1926 case TCP_SEQ_STATE_TIME_WAIT:
1927 get_timewait6_sock(seq, v, st->num);
1928 break;
1929 }
1930 out:
1931 return 0;
1932 }
1933
1934 static struct file_operations tcp6_seq_fops;
1935 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1936 .owner = THIS_MODULE,
1937 .name = "tcp6",
1938 .family = AF_INET6,
1939 .seq_show = tcp6_seq_show,
1940 .seq_fops = &tcp6_seq_fops,
1941 };
1942
1943 int __init tcp6_proc_init(void)
1944 {
1945 return tcp_proc_register(&tcp6_seq_afinfo);
1946 }
1947
1948 void tcp6_proc_exit(void)
1949 {
1950 tcp_proc_unregister(&tcp6_seq_afinfo);
1951 }
1952 #endif
1953
1954 struct proto tcpv6_prot = {
1955 .name = "TCPv6",
1956 .owner = THIS_MODULE,
1957 .close = tcp_close,
1958 .connect = tcp_v6_connect,
1959 .disconnect = tcp_disconnect,
1960 .accept = inet_csk_accept,
1961 .ioctl = tcp_ioctl,
1962 .init = tcp_v6_init_sock,
1963 .destroy = tcp_v6_destroy_sock,
1964 .shutdown = tcp_shutdown,
1965 .setsockopt = tcp_setsockopt,
1966 .getsockopt = tcp_getsockopt,
1967 .sendmsg = tcp_sendmsg,
1968 .recvmsg = tcp_recvmsg,
1969 .backlog_rcv = tcp_v6_do_rcv,
1970 .hash = tcp_v6_hash,
1971 .unhash = tcp_unhash,
1972 .get_port = tcp_v6_get_port,
1973 .enter_memory_pressure = tcp_enter_memory_pressure,
1974 .sockets_allocated = &tcp_sockets_allocated,
1975 .memory_allocated = &tcp_memory_allocated,
1976 .memory_pressure = &tcp_memory_pressure,
1977 .orphan_count = &tcp_orphan_count,
1978 .sysctl_mem = sysctl_tcp_mem,
1979 .sysctl_wmem = sysctl_tcp_wmem,
1980 .sysctl_rmem = sysctl_tcp_rmem,
1981 .max_header = MAX_TCP_HEADER,
1982 .obj_size = sizeof(struct tcp6_sock),
1983 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1984 .rsk_prot = &tcp6_request_sock_ops,
1985 };
1986
1987 static struct inet6_protocol tcpv6_protocol = {
1988 .handler = tcp_v6_rcv,
1989 .err_handler = tcp_v6_err,
1990 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1991 };
1992
1993 static struct inet_protosw tcpv6_protosw = {
1994 .type = SOCK_STREAM,
1995 .protocol = IPPROTO_TCP,
1996 .prot = &tcpv6_prot,
1997 .ops = &inet6_stream_ops,
1998 .capability = -1,
1999 .no_check = 0,
2000 .flags = INET_PROTOSW_PERMANENT,
2001 };
2002
2003 void __init tcpv6_init(void)
2004 {
2005 /* register inet6 protocol */
2006 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2007 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2008 inet6_register_protosw(&tcpv6_protosw);
2009 }