Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62
63 #include <asm/uaccess.h>
64
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
67
68 #include <linux/crypto.h>
69 #include <linux/scatterlist.h>
70
71 /* Socket used for sending RSTs and ACKs */
72 static struct socket *tcp6_socket;
73
74 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
76 static void tcp_v6_send_check(struct sock *sk, int len,
77 struct sk_buff *skb);
78
79 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80
81 static struct inet_connection_sock_af_ops ipv6_mapped;
82 static struct inet_connection_sock_af_ops ipv6_specific;
83 #ifdef CONFIG_TCP_MD5SIG
84 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
85 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86 #endif
87
88 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
89 {
90 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
91 inet6_csk_bind_conflict);
92 }
93
94 static void tcp_v6_hash(struct sock *sk)
95 {
96 if (sk->sk_state != TCP_CLOSE) {
97 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
98 tcp_prot.hash(sk);
99 return;
100 }
101 local_bh_disable();
102 __inet6_hash(&tcp_hashinfo, sk);
103 local_bh_enable();
104 }
105 }
106
107 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
108 struct in6_addr *saddr,
109 struct in6_addr *daddr,
110 __wsum base)
111 {
112 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
113 }
114
115 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
116 {
117 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
118 ipv6_hdr(skb)->saddr.s6_addr32,
119 tcp_hdr(skb)->dest,
120 tcp_hdr(skb)->source);
121 }
122
123 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
124 int addr_len)
125 {
126 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
127 struct inet_sock *inet = inet_sk(sk);
128 struct inet_connection_sock *icsk = inet_csk(sk);
129 struct ipv6_pinfo *np = inet6_sk(sk);
130 struct tcp_sock *tp = tcp_sk(sk);
131 struct in6_addr *saddr = NULL, *final_p = NULL, final;
132 struct flowi fl;
133 struct dst_entry *dst;
134 int addr_type;
135 int err;
136
137 if (addr_len < SIN6_LEN_RFC2133)
138 return -EINVAL;
139
140 if (usin->sin6_family != AF_INET6)
141 return(-EAFNOSUPPORT);
142
143 memset(&fl, 0, sizeof(fl));
144
145 if (np->sndflow) {
146 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
147 IP6_ECN_flow_init(fl.fl6_flowlabel);
148 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
149 struct ip6_flowlabel *flowlabel;
150 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
151 if (flowlabel == NULL)
152 return -EINVAL;
153 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
154 fl6_sock_release(flowlabel);
155 }
156 }
157
158 /*
159 * connect() to INADDR_ANY means loopback (BSD'ism).
160 */
161
162 if(ipv6_addr_any(&usin->sin6_addr))
163 usin->sin6_addr.s6_addr[15] = 0x1;
164
165 addr_type = ipv6_addr_type(&usin->sin6_addr);
166
167 if(addr_type & IPV6_ADDR_MULTICAST)
168 return -ENETUNREACH;
169
170 if (addr_type&IPV6_ADDR_LINKLOCAL) {
171 if (addr_len >= sizeof(struct sockaddr_in6) &&
172 usin->sin6_scope_id) {
173 /* If interface is set while binding, indices
174 * must coincide.
175 */
176 if (sk->sk_bound_dev_if &&
177 sk->sk_bound_dev_if != usin->sin6_scope_id)
178 return -EINVAL;
179
180 sk->sk_bound_dev_if = usin->sin6_scope_id;
181 }
182
183 /* Connect to link-local address requires an interface */
184 if (!sk->sk_bound_dev_if)
185 return -EINVAL;
186 }
187
188 if (tp->rx_opt.ts_recent_stamp &&
189 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
190 tp->rx_opt.ts_recent = 0;
191 tp->rx_opt.ts_recent_stamp = 0;
192 tp->write_seq = 0;
193 }
194
195 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
196 np->flow_label = fl.fl6_flowlabel;
197
198 /*
199 * TCP over IPv4
200 */
201
202 if (addr_type == IPV6_ADDR_MAPPED) {
203 u32 exthdrlen = icsk->icsk_ext_hdr_len;
204 struct sockaddr_in sin;
205
206 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
207
208 if (__ipv6_only_sock(sk))
209 return -ENETUNREACH;
210
211 sin.sin_family = AF_INET;
212 sin.sin_port = usin->sin6_port;
213 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
214
215 icsk->icsk_af_ops = &ipv6_mapped;
216 sk->sk_backlog_rcv = tcp_v4_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
219 #endif
220
221 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
222
223 if (err) {
224 icsk->icsk_ext_hdr_len = exthdrlen;
225 icsk->icsk_af_ops = &ipv6_specific;
226 sk->sk_backlog_rcv = tcp_v6_do_rcv;
227 #ifdef CONFIG_TCP_MD5SIG
228 tp->af_specific = &tcp_sock_ipv6_specific;
229 #endif
230 goto failure;
231 } else {
232 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
233 inet->saddr);
234 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
235 inet->rcv_saddr);
236 }
237
238 return err;
239 }
240
241 if (!ipv6_addr_any(&np->rcv_saddr))
242 saddr = &np->rcv_saddr;
243
244 fl.proto = IPPROTO_TCP;
245 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
246 ipv6_addr_copy(&fl.fl6_src,
247 (saddr ? saddr : &np->saddr));
248 fl.oif = sk->sk_bound_dev_if;
249 fl.fl_ip_dport = usin->sin6_port;
250 fl.fl_ip_sport = inet->sport;
251
252 if (np->opt && np->opt->srcrt) {
253 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
254 ipv6_addr_copy(&final, &fl.fl6_dst);
255 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
256 final_p = &final;
257 }
258
259 security_sk_classify_flow(sk, &fl);
260
261 err = ip6_dst_lookup(sk, &dst, &fl);
262 if (err)
263 goto failure;
264 if (final_p)
265 ipv6_addr_copy(&fl.fl6_dst, final_p);
266
267 if ((err = __xfrm_lookup(&dst, &fl, sk, 1)) < 0) {
268 if (err == -EREMOTE)
269 err = ip6_dst_blackhole(sk, &dst, &fl);
270 if (err < 0)
271 goto failure;
272 }
273
274 if (saddr == NULL) {
275 saddr = &fl.fl6_src;
276 ipv6_addr_copy(&np->rcv_saddr, saddr);
277 }
278
279 /* set the source address */
280 ipv6_addr_copy(&np->saddr, saddr);
281 inet->rcv_saddr = LOOPBACK4_IPV6;
282
283 sk->sk_gso_type = SKB_GSO_TCPV6;
284 __ip6_dst_store(sk, dst, NULL, NULL);
285
286 icsk->icsk_ext_hdr_len = 0;
287 if (np->opt)
288 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
289 np->opt->opt_nflen);
290
291 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
292
293 inet->dport = usin->sin6_port;
294
295 tcp_set_state(sk, TCP_SYN_SENT);
296 err = inet6_hash_connect(&tcp_death_row, sk);
297 if (err)
298 goto late_failure;
299
300 if (!tp->write_seq)
301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 np->daddr.s6_addr32,
303 inet->sport,
304 inet->dport);
305
306 err = tcp_connect(sk);
307 if (err)
308 goto late_failure;
309
310 return 0;
311
312 late_failure:
313 tcp_set_state(sk, TCP_CLOSE);
314 __sk_dst_reset(sk);
315 failure:
316 inet->dport = 0;
317 sk->sk_route_caps = 0;
318 return err;
319 }
320
321 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
322 int type, int code, int offset, __be32 info)
323 {
324 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
325 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
326 struct ipv6_pinfo *np;
327 struct sock *sk;
328 int err;
329 struct tcp_sock *tp;
330 __u32 seq;
331
332 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
333 th->source, skb->dev->ifindex);
334
335 if (sk == NULL) {
336 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
337 return;
338 }
339
340 if (sk->sk_state == TCP_TIME_WAIT) {
341 inet_twsk_put(inet_twsk(sk));
342 return;
343 }
344
345 bh_lock_sock(sk);
346 if (sock_owned_by_user(sk))
347 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
348
349 if (sk->sk_state == TCP_CLOSE)
350 goto out;
351
352 tp = tcp_sk(sk);
353 seq = ntohl(th->seq);
354 if (sk->sk_state != TCP_LISTEN &&
355 !between(seq, tp->snd_una, tp->snd_nxt)) {
356 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
357 goto out;
358 }
359
360 np = inet6_sk(sk);
361
362 if (type == ICMPV6_PKT_TOOBIG) {
363 struct dst_entry *dst = NULL;
364
365 if (sock_owned_by_user(sk))
366 goto out;
367 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
368 goto out;
369
370 /* icmp should have updated the destination cache entry */
371 dst = __sk_dst_check(sk, np->dst_cookie);
372
373 if (dst == NULL) {
374 struct inet_sock *inet = inet_sk(sk);
375 struct flowi fl;
376
377 /* BUGGG_FUTURE: Again, it is not clear how
378 to handle rthdr case. Ignore this complexity
379 for now.
380 */
381 memset(&fl, 0, sizeof(fl));
382 fl.proto = IPPROTO_TCP;
383 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
384 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
385 fl.oif = sk->sk_bound_dev_if;
386 fl.fl_ip_dport = inet->dport;
387 fl.fl_ip_sport = inet->sport;
388 security_skb_classify_flow(skb, &fl);
389
390 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
391 sk->sk_err_soft = -err;
392 goto out;
393 }
394
395 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
396 sk->sk_err_soft = -err;
397 goto out;
398 }
399
400 } else
401 dst_hold(dst);
402
403 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
404 tcp_sync_mss(sk, dst_mtu(dst));
405 tcp_simple_retransmit(sk);
406 } /* else let the usual retransmit timer handle it */
407 dst_release(dst);
408 goto out;
409 }
410
411 icmpv6_err_convert(type, code, &err);
412
413 /* Might be for an request_sock */
414 switch (sk->sk_state) {
415 struct request_sock *req, **prev;
416 case TCP_LISTEN:
417 if (sock_owned_by_user(sk))
418 goto out;
419
420 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
421 &hdr->saddr, inet6_iif(skb));
422 if (!req)
423 goto out;
424
425 /* ICMPs are not backlogged, hence we cannot get
426 * an established socket here.
427 */
428 BUG_TRAP(req->sk == NULL);
429
430 if (seq != tcp_rsk(req)->snt_isn) {
431 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
432 goto out;
433 }
434
435 inet_csk_reqsk_queue_drop(sk, req, prev);
436 goto out;
437
438 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen.
440 It can, it SYNs are crossed. --ANK */
441 if (!sock_owned_by_user(sk)) {
442 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
444
445 tcp_done(sk);
446 } else
447 sk->sk_err_soft = err;
448 goto out;
449 }
450
451 if (!sock_owned_by_user(sk) && np->recverr) {
452 sk->sk_err = err;
453 sk->sk_error_report(sk);
454 } else
455 sk->sk_err_soft = err;
456
457 out:
458 bh_unlock_sock(sk);
459 sock_put(sk);
460 }
461
462
463 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
464 struct dst_entry *dst)
465 {
466 struct inet6_request_sock *treq = inet6_rsk(req);
467 struct ipv6_pinfo *np = inet6_sk(sk);
468 struct sk_buff * skb;
469 struct ipv6_txoptions *opt = NULL;
470 struct in6_addr * final_p = NULL, final;
471 struct flowi fl;
472 int err = -1;
473
474 memset(&fl, 0, sizeof(fl));
475 fl.proto = IPPROTO_TCP;
476 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
477 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
478 fl.fl6_flowlabel = 0;
479 fl.oif = treq->iif;
480 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
481 fl.fl_ip_sport = inet_sk(sk)->sport;
482 security_req_classify_flow(req, &fl);
483
484 if (dst == NULL) {
485 opt = np->opt;
486 if (opt && opt->srcrt) {
487 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
488 ipv6_addr_copy(&final, &fl.fl6_dst);
489 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
490 final_p = &final;
491 }
492
493 err = ip6_dst_lookup(sk, &dst, &fl);
494 if (err)
495 goto done;
496 if (final_p)
497 ipv6_addr_copy(&fl.fl6_dst, final_p);
498 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
499 goto done;
500 }
501
502 skb = tcp_make_synack(sk, dst, req);
503 if (skb) {
504 struct tcphdr *th = tcp_hdr(skb);
505
506 th->check = tcp_v6_check(th, skb->len,
507 &treq->loc_addr, &treq->rmt_addr,
508 csum_partial((char *)th, skb->len, skb->csum));
509
510 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
511 err = ip6_xmit(sk, skb, &fl, opt, 0);
512 err = net_xmit_eval(err);
513 }
514
515 done:
516 if (opt && opt != np->opt)
517 sock_kfree_s(sk, opt, opt->tot_len);
518 dst_release(dst);
519 return err;
520 }
521
522 static void tcp_v6_reqsk_destructor(struct request_sock *req)
523 {
524 if (inet6_rsk(req)->pktopts)
525 kfree_skb(inet6_rsk(req)->pktopts);
526 }
527
528 #ifdef CONFIG_TCP_MD5SIG
529 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
530 struct in6_addr *addr)
531 {
532 struct tcp_sock *tp = tcp_sk(sk);
533 int i;
534
535 BUG_ON(tp == NULL);
536
537 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
538 return NULL;
539
540 for (i = 0; i < tp->md5sig_info->entries6; i++) {
541 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
542 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
543 }
544 return NULL;
545 }
546
547 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
548 struct sock *addr_sk)
549 {
550 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
551 }
552
553 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
554 struct request_sock *req)
555 {
556 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
557 }
558
559 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
560 char *newkey, u8 newkeylen)
561 {
562 /* Add key to the list */
563 struct tcp6_md5sig_key *key;
564 struct tcp_sock *tp = tcp_sk(sk);
565 struct tcp6_md5sig_key *keys;
566
567 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
568 if (key) {
569 /* modify existing entry - just update that one */
570 kfree(key->key);
571 key->key = newkey;
572 key->keylen = newkeylen;
573 } else {
574 /* reallocate new list if current one is full. */
575 if (!tp->md5sig_info) {
576 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
577 if (!tp->md5sig_info) {
578 kfree(newkey);
579 return -ENOMEM;
580 }
581 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
582 }
583 tcp_alloc_md5sig_pool();
584 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
585 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
586 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
587
588 if (!keys) {
589 tcp_free_md5sig_pool();
590 kfree(newkey);
591 return -ENOMEM;
592 }
593
594 if (tp->md5sig_info->entries6)
595 memmove(keys, tp->md5sig_info->keys6,
596 (sizeof (tp->md5sig_info->keys6[0]) *
597 tp->md5sig_info->entries6));
598
599 kfree(tp->md5sig_info->keys6);
600 tp->md5sig_info->keys6 = keys;
601 tp->md5sig_info->alloced6++;
602 }
603
604 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
605 peer);
606 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
607 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
608
609 tp->md5sig_info->entries6++;
610 }
611 return 0;
612 }
613
614 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
615 u8 *newkey, __u8 newkeylen)
616 {
617 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
618 newkey, newkeylen);
619 }
620
621 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
622 {
623 struct tcp_sock *tp = tcp_sk(sk);
624 int i;
625
626 for (i = 0; i < tp->md5sig_info->entries6; i++) {
627 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
628 /* Free the key */
629 kfree(tp->md5sig_info->keys6[i].key);
630 tp->md5sig_info->entries6--;
631
632 if (tp->md5sig_info->entries6 == 0) {
633 kfree(tp->md5sig_info->keys6);
634 tp->md5sig_info->keys6 = NULL;
635 tp->md5sig_info->alloced6 = 0;
636
637 tcp_free_md5sig_pool();
638
639 return 0;
640 } else {
641 /* shrink the database */
642 if (tp->md5sig_info->entries6 != i)
643 memmove(&tp->md5sig_info->keys6[i],
644 &tp->md5sig_info->keys6[i+1],
645 (tp->md5sig_info->entries6 - i)
646 * sizeof (tp->md5sig_info->keys6[0]));
647 }
648 }
649 }
650 return -ENOENT;
651 }
652
653 static void tcp_v6_clear_md5_list (struct sock *sk)
654 {
655 struct tcp_sock *tp = tcp_sk(sk);
656 int i;
657
658 if (tp->md5sig_info->entries6) {
659 for (i = 0; i < tp->md5sig_info->entries6; i++)
660 kfree(tp->md5sig_info->keys6[i].key);
661 tp->md5sig_info->entries6 = 0;
662 tcp_free_md5sig_pool();
663 }
664
665 kfree(tp->md5sig_info->keys6);
666 tp->md5sig_info->keys6 = NULL;
667 tp->md5sig_info->alloced6 = 0;
668
669 if (tp->md5sig_info->entries4) {
670 for (i = 0; i < tp->md5sig_info->entries4; i++)
671 kfree(tp->md5sig_info->keys4[i].key);
672 tp->md5sig_info->entries4 = 0;
673 tcp_free_md5sig_pool();
674 }
675
676 kfree(tp->md5sig_info->keys4);
677 tp->md5sig_info->keys4 = NULL;
678 tp->md5sig_info->alloced4 = 0;
679 }
680
681 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
682 int optlen)
683 {
684 struct tcp_md5sig cmd;
685 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
686 u8 *newkey;
687
688 if (optlen < sizeof(cmd))
689 return -EINVAL;
690
691 if (copy_from_user(&cmd, optval, sizeof(cmd)))
692 return -EFAULT;
693
694 if (sin6->sin6_family != AF_INET6)
695 return -EINVAL;
696
697 if (!cmd.tcpm_keylen) {
698 if (!tcp_sk(sk)->md5sig_info)
699 return -ENOENT;
700 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
701 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
702 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
703 }
704
705 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
706 return -EINVAL;
707
708 if (!tcp_sk(sk)->md5sig_info) {
709 struct tcp_sock *tp = tcp_sk(sk);
710 struct tcp_md5sig_info *p;
711
712 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
713 if (!p)
714 return -ENOMEM;
715
716 tp->md5sig_info = p;
717 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
718 }
719
720 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
721 if (!newkey)
722 return -ENOMEM;
723 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
724 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
725 newkey, cmd.tcpm_keylen);
726 }
727 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
728 }
729
730 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
731 struct in6_addr *saddr,
732 struct in6_addr *daddr,
733 struct tcphdr *th, int protocol,
734 int tcplen)
735 {
736 struct scatterlist sg[4];
737 __u16 data_len;
738 int block = 0;
739 __sum16 cksum;
740 struct tcp_md5sig_pool *hp;
741 struct tcp6_pseudohdr *bp;
742 struct hash_desc *desc;
743 int err;
744 unsigned int nbytes = 0;
745
746 hp = tcp_get_md5sig_pool();
747 if (!hp) {
748 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
749 goto clear_hash_noput;
750 }
751 bp = &hp->md5_blk.ip6;
752 desc = &hp->md5_desc;
753
754 /* 1. TCP pseudo-header (RFC2460) */
755 ipv6_addr_copy(&bp->saddr, saddr);
756 ipv6_addr_copy(&bp->daddr, daddr);
757 bp->len = htonl(tcplen);
758 bp->protocol = htonl(protocol);
759
760 sg_set_buf(&sg[block++], bp, sizeof(*bp));
761 nbytes += sizeof(*bp);
762
763 /* 2. TCP header, excluding options */
764 cksum = th->check;
765 th->check = 0;
766 sg_set_buf(&sg[block++], th, sizeof(*th));
767 nbytes += sizeof(*th);
768
769 /* 3. TCP segment data (if any) */
770 data_len = tcplen - (th->doff << 2);
771 if (data_len > 0) {
772 u8 *data = (u8 *)th + (th->doff << 2);
773 sg_set_buf(&sg[block++], data, data_len);
774 nbytes += data_len;
775 }
776
777 /* 4. shared key */
778 sg_set_buf(&sg[block++], key->key, key->keylen);
779 nbytes += key->keylen;
780
781 /* Now store the hash into the packet */
782 err = crypto_hash_init(desc);
783 if (err) {
784 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
785 goto clear_hash;
786 }
787 err = crypto_hash_update(desc, sg, nbytes);
788 if (err) {
789 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
790 goto clear_hash;
791 }
792 err = crypto_hash_final(desc, md5_hash);
793 if (err) {
794 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
795 goto clear_hash;
796 }
797
798 /* Reset header, and free up the crypto */
799 tcp_put_md5sig_pool();
800 th->check = cksum;
801 out:
802 return 0;
803 clear_hash:
804 tcp_put_md5sig_pool();
805 clear_hash_noput:
806 memset(md5_hash, 0, 16);
807 goto out;
808 }
809
810 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
811 struct sock *sk,
812 struct dst_entry *dst,
813 struct request_sock *req,
814 struct tcphdr *th, int protocol,
815 int tcplen)
816 {
817 struct in6_addr *saddr, *daddr;
818
819 if (sk) {
820 saddr = &inet6_sk(sk)->saddr;
821 daddr = &inet6_sk(sk)->daddr;
822 } else {
823 saddr = &inet6_rsk(req)->loc_addr;
824 daddr = &inet6_rsk(req)->rmt_addr;
825 }
826 return tcp_v6_do_calc_md5_hash(md5_hash, key,
827 saddr, daddr,
828 th, protocol, tcplen);
829 }
830
831 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
832 {
833 __u8 *hash_location = NULL;
834 struct tcp_md5sig_key *hash_expected;
835 struct ipv6hdr *ip6h = ipv6_hdr(skb);
836 struct tcphdr *th = tcp_hdr(skb);
837 int length = (th->doff << 2) - sizeof (*th);
838 int genhash;
839 u8 *ptr;
840 u8 newhash[16];
841
842 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
843
844 /* If the TCP option is too short, we can short cut */
845 if (length < TCPOLEN_MD5SIG)
846 return hash_expected ? 1 : 0;
847
848 /* parse options */
849 ptr = (u8*)(th + 1);
850 while (length > 0) {
851 int opcode = *ptr++;
852 int opsize;
853
854 switch(opcode) {
855 case TCPOPT_EOL:
856 goto done_opts;
857 case TCPOPT_NOP:
858 length--;
859 continue;
860 default:
861 opsize = *ptr++;
862 if (opsize < 2 || opsize > length)
863 goto done_opts;
864 if (opcode == TCPOPT_MD5SIG) {
865 hash_location = ptr;
866 goto done_opts;
867 }
868 }
869 ptr += opsize - 2;
870 length -= opsize;
871 }
872
873 done_opts:
874 /* do we have a hash as expected? */
875 if (!hash_expected) {
876 if (!hash_location)
877 return 0;
878 if (net_ratelimit()) {
879 printk(KERN_INFO "MD5 Hash NOT expected but found "
880 "(" NIP6_FMT ", %u)->"
881 "(" NIP6_FMT ", %u)\n",
882 NIP6(ip6h->saddr), ntohs(th->source),
883 NIP6(ip6h->daddr), ntohs(th->dest));
884 }
885 return 1;
886 }
887
888 if (!hash_location) {
889 if (net_ratelimit()) {
890 printk(KERN_INFO "MD5 Hash expected but NOT found "
891 "(" NIP6_FMT ", %u)->"
892 "(" NIP6_FMT ", %u)\n",
893 NIP6(ip6h->saddr), ntohs(th->source),
894 NIP6(ip6h->daddr), ntohs(th->dest));
895 }
896 return 1;
897 }
898
899 /* check the signature */
900 genhash = tcp_v6_do_calc_md5_hash(newhash,
901 hash_expected,
902 &ip6h->saddr, &ip6h->daddr,
903 th, sk->sk_protocol,
904 skb->len);
905 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
906 if (net_ratelimit()) {
907 printk(KERN_INFO "MD5 Hash %s for "
908 "(" NIP6_FMT ", %u)->"
909 "(" NIP6_FMT ", %u)\n",
910 genhash ? "failed" : "mismatch",
911 NIP6(ip6h->saddr), ntohs(th->source),
912 NIP6(ip6h->daddr), ntohs(th->dest));
913 }
914 return 1;
915 }
916 return 0;
917 }
918 #endif
919
920 static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
921 .family = AF_INET6,
922 .obj_size = sizeof(struct tcp6_request_sock),
923 .rtx_syn_ack = tcp_v6_send_synack,
924 .send_ack = tcp_v6_reqsk_send_ack,
925 .destructor = tcp_v6_reqsk_destructor,
926 .send_reset = tcp_v6_send_reset
927 };
928
929 #ifdef CONFIG_TCP_MD5SIG
930 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
931 .md5_lookup = tcp_v6_reqsk_md5_lookup,
932 };
933 #endif
934
935 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
936 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
937 .twsk_unique = tcp_twsk_unique,
938 .twsk_destructor= tcp_twsk_destructor,
939 };
940
941 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
942 {
943 struct ipv6_pinfo *np = inet6_sk(sk);
944 struct tcphdr *th = tcp_hdr(skb);
945
946 if (skb->ip_summed == CHECKSUM_PARTIAL) {
947 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
948 skb->csum_start = skb_transport_header(skb) - skb->head;
949 skb->csum_offset = offsetof(struct tcphdr, check);
950 } else {
951 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
952 csum_partial((char *)th, th->doff<<2,
953 skb->csum));
954 }
955 }
956
957 static int tcp_v6_gso_send_check(struct sk_buff *skb)
958 {
959 struct ipv6hdr *ipv6h;
960 struct tcphdr *th;
961
962 if (!pskb_may_pull(skb, sizeof(*th)))
963 return -EINVAL;
964
965 ipv6h = ipv6_hdr(skb);
966 th = tcp_hdr(skb);
967
968 th->check = 0;
969 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
970 IPPROTO_TCP, 0);
971 skb->csum_start = skb_transport_header(skb) - skb->head;
972 skb->csum_offset = offsetof(struct tcphdr, check);
973 skb->ip_summed = CHECKSUM_PARTIAL;
974 return 0;
975 }
976
977 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
978 {
979 struct tcphdr *th = tcp_hdr(skb), *t1;
980 struct sk_buff *buff;
981 struct flowi fl;
982 int tot_len = sizeof(*th);
983 #ifdef CONFIG_TCP_MD5SIG
984 struct tcp_md5sig_key *key;
985 #endif
986
987 if (th->rst)
988 return;
989
990 if (!ipv6_unicast_destination(skb))
991 return;
992
993 #ifdef CONFIG_TCP_MD5SIG
994 if (sk)
995 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
996 else
997 key = NULL;
998
999 if (key)
1000 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1001 #endif
1002
1003 /*
1004 * We need to grab some memory, and put together an RST,
1005 * and then put it into the queue to be sent.
1006 */
1007
1008 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1009 GFP_ATOMIC);
1010 if (buff == NULL)
1011 return;
1012
1013 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1014
1015 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1016
1017 /* Swap the send and the receive. */
1018 memset(t1, 0, sizeof(*t1));
1019 t1->dest = th->source;
1020 t1->source = th->dest;
1021 t1->doff = tot_len / 4;
1022 t1->rst = 1;
1023
1024 if(th->ack) {
1025 t1->seq = th->ack_seq;
1026 } else {
1027 t1->ack = 1;
1028 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1029 + skb->len - (th->doff<<2));
1030 }
1031
1032 #ifdef CONFIG_TCP_MD5SIG
1033 if (key) {
1034 __be32 *opt = (__be32*)(t1 + 1);
1035 opt[0] = htonl((TCPOPT_NOP << 24) |
1036 (TCPOPT_NOP << 16) |
1037 (TCPOPT_MD5SIG << 8) |
1038 TCPOLEN_MD5SIG);
1039 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1040 &ipv6_hdr(skb)->daddr,
1041 &ipv6_hdr(skb)->saddr,
1042 t1, IPPROTO_TCP, tot_len);
1043 }
1044 #endif
1045
1046 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1047
1048 memset(&fl, 0, sizeof(fl));
1049 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1050 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1051
1052 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1053 sizeof(*t1), IPPROTO_TCP,
1054 buff->csum);
1055
1056 fl.proto = IPPROTO_TCP;
1057 fl.oif = inet6_iif(skb);
1058 fl.fl_ip_dport = t1->dest;
1059 fl.fl_ip_sport = t1->source;
1060 security_skb_classify_flow(skb, &fl);
1061
1062 /* sk = NULL, but it is safe for now. RST socket required. */
1063 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1064
1065 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1066 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1067 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1068 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1069 return;
1070 }
1071 }
1072
1073 kfree_skb(buff);
1074 }
1075
1076 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1077 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1078 {
1079 struct tcphdr *th = tcp_hdr(skb), *t1;
1080 struct sk_buff *buff;
1081 struct flowi fl;
1082 int tot_len = sizeof(struct tcphdr);
1083 __be32 *topt;
1084 #ifdef CONFIG_TCP_MD5SIG
1085 struct tcp_md5sig_key *key;
1086 struct tcp_md5sig_key tw_key;
1087 #endif
1088
1089 #ifdef CONFIG_TCP_MD5SIG
1090 if (!tw && skb->sk) {
1091 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1092 } else if (tw && tw->tw_md5_keylen) {
1093 tw_key.key = tw->tw_md5_key;
1094 tw_key.keylen = tw->tw_md5_keylen;
1095 key = &tw_key;
1096 } else {
1097 key = NULL;
1098 }
1099 #endif
1100
1101 if (ts)
1102 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1103 #ifdef CONFIG_TCP_MD5SIG
1104 if (key)
1105 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1106 #endif
1107
1108 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1109 GFP_ATOMIC);
1110 if (buff == NULL)
1111 return;
1112
1113 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1114
1115 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1116
1117 /* Swap the send and the receive. */
1118 memset(t1, 0, sizeof(*t1));
1119 t1->dest = th->source;
1120 t1->source = th->dest;
1121 t1->doff = tot_len/4;
1122 t1->seq = htonl(seq);
1123 t1->ack_seq = htonl(ack);
1124 t1->ack = 1;
1125 t1->window = htons(win);
1126
1127 topt = (__be32 *)(t1 + 1);
1128
1129 if (ts) {
1130 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1131 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1132 *topt++ = htonl(tcp_time_stamp);
1133 *topt = htonl(ts);
1134 }
1135
1136 #ifdef CONFIG_TCP_MD5SIG
1137 if (key) {
1138 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1139 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1140 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1141 &ipv6_hdr(skb)->daddr,
1142 &ipv6_hdr(skb)->saddr,
1143 t1, IPPROTO_TCP, tot_len);
1144 }
1145 #endif
1146
1147 buff->csum = csum_partial((char *)t1, tot_len, 0);
1148
1149 memset(&fl, 0, sizeof(fl));
1150 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1151 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1152
1153 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1154 tot_len, IPPROTO_TCP,
1155 buff->csum);
1156
1157 fl.proto = IPPROTO_TCP;
1158 fl.oif = inet6_iif(skb);
1159 fl.fl_ip_dport = t1->dest;
1160 fl.fl_ip_sport = t1->source;
1161 security_skb_classify_flow(skb, &fl);
1162
1163 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1164 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1165 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1166 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1167 return;
1168 }
1169 }
1170
1171 kfree_skb(buff);
1172 }
1173
1174 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1175 {
1176 struct inet_timewait_sock *tw = inet_twsk(sk);
1177 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1178
1179 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1180 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1181 tcptw->tw_ts_recent);
1182
1183 inet_twsk_put(tw);
1184 }
1185
1186 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1187 {
1188 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1189 }
1190
1191
1192 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1193 {
1194 struct request_sock *req, **prev;
1195 const struct tcphdr *th = tcp_hdr(skb);
1196 struct sock *nsk;
1197
1198 /* Find possible connection requests. */
1199 req = inet6_csk_search_req(sk, &prev, th->source,
1200 &ipv6_hdr(skb)->saddr,
1201 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1202 if (req)
1203 return tcp_check_req(sk, skb, req, prev);
1204
1205 nsk = __inet6_lookup_established(&tcp_hashinfo, &ipv6_hdr(skb)->saddr,
1206 th->source, &ipv6_hdr(skb)->daddr,
1207 ntohs(th->dest), inet6_iif(skb));
1208
1209 if (nsk) {
1210 if (nsk->sk_state != TCP_TIME_WAIT) {
1211 bh_lock_sock(nsk);
1212 return nsk;
1213 }
1214 inet_twsk_put(inet_twsk(nsk));
1215 return NULL;
1216 }
1217
1218 #if 0 /*def CONFIG_SYN_COOKIES*/
1219 if (!th->rst && !th->syn && th->ack)
1220 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1221 #endif
1222 return sk;
1223 }
1224
1225 /* FIXME: this is substantially similar to the ipv4 code.
1226 * Can some kind of merge be done? -- erics
1227 */
1228 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1229 {
1230 struct inet6_request_sock *treq;
1231 struct ipv6_pinfo *np = inet6_sk(sk);
1232 struct tcp_options_received tmp_opt;
1233 struct tcp_sock *tp = tcp_sk(sk);
1234 struct request_sock *req = NULL;
1235 __u32 isn = TCP_SKB_CB(skb)->when;
1236
1237 if (skb->protocol == htons(ETH_P_IP))
1238 return tcp_v4_conn_request(sk, skb);
1239
1240 if (!ipv6_unicast_destination(skb))
1241 goto drop;
1242
1243 /*
1244 * There are no SYN attacks on IPv6, yet...
1245 */
1246 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1247 if (net_ratelimit())
1248 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1249 goto drop;
1250 }
1251
1252 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1253 goto drop;
1254
1255 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1256 if (req == NULL)
1257 goto drop;
1258
1259 #ifdef CONFIG_TCP_MD5SIG
1260 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1261 #endif
1262
1263 tcp_clear_options(&tmp_opt);
1264 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1265 tmp_opt.user_mss = tp->rx_opt.user_mss;
1266
1267 tcp_parse_options(skb, &tmp_opt, 0);
1268
1269 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1270 tcp_openreq_init(req, &tmp_opt, skb);
1271
1272 treq = inet6_rsk(req);
1273 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1274 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1275 TCP_ECN_create_request(req, tcp_hdr(skb));
1276 treq->pktopts = NULL;
1277 if (ipv6_opt_accepted(sk, skb) ||
1278 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1279 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1280 atomic_inc(&skb->users);
1281 treq->pktopts = skb;
1282 }
1283 treq->iif = sk->sk_bound_dev_if;
1284
1285 /* So that link locals have meaning */
1286 if (!sk->sk_bound_dev_if &&
1287 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1288 treq->iif = inet6_iif(skb);
1289
1290 if (isn == 0)
1291 isn = tcp_v6_init_sequence(skb);
1292
1293 tcp_rsk(req)->snt_isn = isn;
1294
1295 security_inet_conn_request(sk, skb, req);
1296
1297 if (tcp_v6_send_synack(sk, req, NULL))
1298 goto drop;
1299
1300 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1301 return 0;
1302
1303 drop:
1304 if (req)
1305 reqsk_free(req);
1306
1307 return 0; /* don't send reset */
1308 }
1309
1310 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1311 struct request_sock *req,
1312 struct dst_entry *dst)
1313 {
1314 struct inet6_request_sock *treq = inet6_rsk(req);
1315 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1316 struct tcp6_sock *newtcp6sk;
1317 struct inet_sock *newinet;
1318 struct tcp_sock *newtp;
1319 struct sock *newsk;
1320 struct ipv6_txoptions *opt;
1321 #ifdef CONFIG_TCP_MD5SIG
1322 struct tcp_md5sig_key *key;
1323 #endif
1324
1325 if (skb->protocol == htons(ETH_P_IP)) {
1326 /*
1327 * v6 mapped
1328 */
1329
1330 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1331
1332 if (newsk == NULL)
1333 return NULL;
1334
1335 newtcp6sk = (struct tcp6_sock *)newsk;
1336 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1337
1338 newinet = inet_sk(newsk);
1339 newnp = inet6_sk(newsk);
1340 newtp = tcp_sk(newsk);
1341
1342 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1343
1344 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1345 newinet->daddr);
1346
1347 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1348 newinet->saddr);
1349
1350 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1351
1352 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1353 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1354 #ifdef CONFIG_TCP_MD5SIG
1355 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1356 #endif
1357
1358 newnp->pktoptions = NULL;
1359 newnp->opt = NULL;
1360 newnp->mcast_oif = inet6_iif(skb);
1361 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1362
1363 /*
1364 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1365 * here, tcp_create_openreq_child now does this for us, see the comment in
1366 * that function for the gory details. -acme
1367 */
1368
1369 /* It is tricky place. Until this moment IPv4 tcp
1370 worked with IPv6 icsk.icsk_af_ops.
1371 Sync it now.
1372 */
1373 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1374
1375 return newsk;
1376 }
1377
1378 opt = np->opt;
1379
1380 if (sk_acceptq_is_full(sk))
1381 goto out_overflow;
1382
1383 if (dst == NULL) {
1384 struct in6_addr *final_p = NULL, final;
1385 struct flowi fl;
1386
1387 memset(&fl, 0, sizeof(fl));
1388 fl.proto = IPPROTO_TCP;
1389 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1390 if (opt && opt->srcrt) {
1391 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1392 ipv6_addr_copy(&final, &fl.fl6_dst);
1393 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1394 final_p = &final;
1395 }
1396 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1397 fl.oif = sk->sk_bound_dev_if;
1398 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1399 fl.fl_ip_sport = inet_sk(sk)->sport;
1400 security_req_classify_flow(req, &fl);
1401
1402 if (ip6_dst_lookup(sk, &dst, &fl))
1403 goto out;
1404
1405 if (final_p)
1406 ipv6_addr_copy(&fl.fl6_dst, final_p);
1407
1408 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1409 goto out;
1410 }
1411
1412 newsk = tcp_create_openreq_child(sk, req, skb);
1413 if (newsk == NULL)
1414 goto out;
1415
1416 /*
1417 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1418 * count here, tcp_create_openreq_child now does this for us, see the
1419 * comment in that function for the gory details. -acme
1420 */
1421
1422 newsk->sk_gso_type = SKB_GSO_TCPV6;
1423 __ip6_dst_store(newsk, dst, NULL, NULL);
1424
1425 newtcp6sk = (struct tcp6_sock *)newsk;
1426 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1427
1428 newtp = tcp_sk(newsk);
1429 newinet = inet_sk(newsk);
1430 newnp = inet6_sk(newsk);
1431
1432 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1433
1434 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1435 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1436 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1437 newsk->sk_bound_dev_if = treq->iif;
1438
1439 /* Now IPv6 options...
1440
1441 First: no IPv4 options.
1442 */
1443 newinet->opt = NULL;
1444 newnp->ipv6_fl_list = NULL;
1445
1446 /* Clone RX bits */
1447 newnp->rxopt.all = np->rxopt.all;
1448
1449 /* Clone pktoptions received with SYN */
1450 newnp->pktoptions = NULL;
1451 if (treq->pktopts != NULL) {
1452 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1453 kfree_skb(treq->pktopts);
1454 treq->pktopts = NULL;
1455 if (newnp->pktoptions)
1456 skb_set_owner_r(newnp->pktoptions, newsk);
1457 }
1458 newnp->opt = NULL;
1459 newnp->mcast_oif = inet6_iif(skb);
1460 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1461
1462 /* Clone native IPv6 options from listening socket (if any)
1463
1464 Yes, keeping reference count would be much more clever,
1465 but we make one more one thing there: reattach optmem
1466 to newsk.
1467 */
1468 if (opt) {
1469 newnp->opt = ipv6_dup_options(newsk, opt);
1470 if (opt != np->opt)
1471 sock_kfree_s(sk, opt, opt->tot_len);
1472 }
1473
1474 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1475 if (newnp->opt)
1476 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1477 newnp->opt->opt_flen);
1478
1479 tcp_mtup_init(newsk);
1480 tcp_sync_mss(newsk, dst_mtu(dst));
1481 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1482 tcp_initialize_rcv_mss(newsk);
1483
1484 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1485
1486 #ifdef CONFIG_TCP_MD5SIG
1487 /* Copy over the MD5 key from the original socket */
1488 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1489 /* We're using one, so create a matching key
1490 * on the newsk structure. If we fail to get
1491 * memory, then we end up not copying the key
1492 * across. Shucks.
1493 */
1494 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1495 if (newkey != NULL)
1496 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1497 newkey, key->keylen);
1498 }
1499 #endif
1500
1501 __inet6_hash(&tcp_hashinfo, newsk);
1502 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1503
1504 return newsk;
1505
1506 out_overflow:
1507 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1508 out:
1509 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1510 if (opt && opt != np->opt)
1511 sock_kfree_s(sk, opt, opt->tot_len);
1512 dst_release(dst);
1513 return NULL;
1514 }
1515
1516 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1517 {
1518 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1519 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1520 &ipv6_hdr(skb)->daddr, skb->csum)) {
1521 skb->ip_summed = CHECKSUM_UNNECESSARY;
1522 return 0;
1523 }
1524 }
1525
1526 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1527 &ipv6_hdr(skb)->saddr,
1528 &ipv6_hdr(skb)->daddr, 0));
1529
1530 if (skb->len <= 76) {
1531 return __skb_checksum_complete(skb);
1532 }
1533 return 0;
1534 }
1535
1536 /* The socket must have it's spinlock held when we get
1537 * here.
1538 *
1539 * We have a potential double-lock case here, so even when
1540 * doing backlog processing we use the BH locking scheme.
1541 * This is because we cannot sleep with the original spinlock
1542 * held.
1543 */
1544 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1545 {
1546 struct ipv6_pinfo *np = inet6_sk(sk);
1547 struct tcp_sock *tp;
1548 struct sk_buff *opt_skb = NULL;
1549
1550 /* Imagine: socket is IPv6. IPv4 packet arrives,
1551 goes to IPv4 receive handler and backlogged.
1552 From backlog it always goes here. Kerboom...
1553 Fortunately, tcp_rcv_established and rcv_established
1554 handle them correctly, but it is not case with
1555 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1556 */
1557
1558 if (skb->protocol == htons(ETH_P_IP))
1559 return tcp_v4_do_rcv(sk, skb);
1560
1561 #ifdef CONFIG_TCP_MD5SIG
1562 if (tcp_v6_inbound_md5_hash (sk, skb))
1563 goto discard;
1564 #endif
1565
1566 if (sk_filter(sk, skb))
1567 goto discard;
1568
1569 /*
1570 * socket locking is here for SMP purposes as backlog rcv
1571 * is currently called with bh processing disabled.
1572 */
1573
1574 /* Do Stevens' IPV6_PKTOPTIONS.
1575
1576 Yes, guys, it is the only place in our code, where we
1577 may make it not affecting IPv4.
1578 The rest of code is protocol independent,
1579 and I do not like idea to uglify IPv4.
1580
1581 Actually, all the idea behind IPV6_PKTOPTIONS
1582 looks not very well thought. For now we latch
1583 options, received in the last packet, enqueued
1584 by tcp. Feel free to propose better solution.
1585 --ANK (980728)
1586 */
1587 if (np->rxopt.all)
1588 opt_skb = skb_clone(skb, GFP_ATOMIC);
1589
1590 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1591 TCP_CHECK_TIMER(sk);
1592 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1593 goto reset;
1594 TCP_CHECK_TIMER(sk);
1595 if (opt_skb)
1596 goto ipv6_pktoptions;
1597 return 0;
1598 }
1599
1600 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1601 goto csum_err;
1602
1603 if (sk->sk_state == TCP_LISTEN) {
1604 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1605 if (!nsk)
1606 goto discard;
1607
1608 /*
1609 * Queue it on the new socket if the new socket is active,
1610 * otherwise we just shortcircuit this and continue with
1611 * the new socket..
1612 */
1613 if(nsk != sk) {
1614 if (tcp_child_process(sk, nsk, skb))
1615 goto reset;
1616 if (opt_skb)
1617 __kfree_skb(opt_skb);
1618 return 0;
1619 }
1620 }
1621
1622 TCP_CHECK_TIMER(sk);
1623 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1624 goto reset;
1625 TCP_CHECK_TIMER(sk);
1626 if (opt_skb)
1627 goto ipv6_pktoptions;
1628 return 0;
1629
1630 reset:
1631 tcp_v6_send_reset(sk, skb);
1632 discard:
1633 if (opt_skb)
1634 __kfree_skb(opt_skb);
1635 kfree_skb(skb);
1636 return 0;
1637 csum_err:
1638 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1639 goto discard;
1640
1641
1642 ipv6_pktoptions:
1643 /* Do you ask, what is it?
1644
1645 1. skb was enqueued by tcp.
1646 2. skb is added to tail of read queue, rather than out of order.
1647 3. socket is not in passive state.
1648 4. Finally, it really contains options, which user wants to receive.
1649 */
1650 tp = tcp_sk(sk);
1651 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1652 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1653 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1654 np->mcast_oif = inet6_iif(opt_skb);
1655 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1656 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1657 if (ipv6_opt_accepted(sk, opt_skb)) {
1658 skb_set_owner_r(opt_skb, sk);
1659 opt_skb = xchg(&np->pktoptions, opt_skb);
1660 } else {
1661 __kfree_skb(opt_skb);
1662 opt_skb = xchg(&np->pktoptions, NULL);
1663 }
1664 }
1665
1666 if (opt_skb)
1667 kfree_skb(opt_skb);
1668 return 0;
1669 }
1670
1671 static int tcp_v6_rcv(struct sk_buff **pskb)
1672 {
1673 struct sk_buff *skb = *pskb;
1674 struct tcphdr *th;
1675 struct sock *sk;
1676 int ret;
1677
1678 if (skb->pkt_type != PACKET_HOST)
1679 goto discard_it;
1680
1681 /*
1682 * Count it even if it's bad.
1683 */
1684 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1685
1686 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1687 goto discard_it;
1688
1689 th = tcp_hdr(skb);
1690
1691 if (th->doff < sizeof(struct tcphdr)/4)
1692 goto bad_packet;
1693 if (!pskb_may_pull(skb, th->doff*4))
1694 goto discard_it;
1695
1696 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1697 goto bad_packet;
1698
1699 th = tcp_hdr(skb);
1700 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1701 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1702 skb->len - th->doff*4);
1703 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1704 TCP_SKB_CB(skb)->when = 0;
1705 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1706 TCP_SKB_CB(skb)->sacked = 0;
1707
1708 sk = __inet6_lookup(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source,
1709 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1710 inet6_iif(skb));
1711
1712 if (!sk)
1713 goto no_tcp_socket;
1714
1715 process:
1716 if (sk->sk_state == TCP_TIME_WAIT)
1717 goto do_time_wait;
1718
1719 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1720 goto discard_and_relse;
1721
1722 if (sk_filter(sk, skb))
1723 goto discard_and_relse;
1724
1725 skb->dev = NULL;
1726
1727 bh_lock_sock_nested(sk);
1728 ret = 0;
1729 if (!sock_owned_by_user(sk)) {
1730 #ifdef CONFIG_NET_DMA
1731 struct tcp_sock *tp = tcp_sk(sk);
1732 if (tp->ucopy.dma_chan)
1733 ret = tcp_v6_do_rcv(sk, skb);
1734 else
1735 #endif
1736 {
1737 if (!tcp_prequeue(sk, skb))
1738 ret = tcp_v6_do_rcv(sk, skb);
1739 }
1740 } else
1741 sk_add_backlog(sk, skb);
1742 bh_unlock_sock(sk);
1743
1744 sock_put(sk);
1745 return ret ? -1 : 0;
1746
1747 no_tcp_socket:
1748 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1749 goto discard_it;
1750
1751 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1752 bad_packet:
1753 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1754 } else {
1755 tcp_v6_send_reset(NULL, skb);
1756 }
1757
1758 discard_it:
1759
1760 /*
1761 * Discard frame
1762 */
1763
1764 kfree_skb(skb);
1765 return 0;
1766
1767 discard_and_relse:
1768 sock_put(sk);
1769 goto discard_it;
1770
1771 do_time_wait:
1772 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1773 inet_twsk_put(inet_twsk(sk));
1774 goto discard_it;
1775 }
1776
1777 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1778 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1779 inet_twsk_put(inet_twsk(sk));
1780 goto discard_it;
1781 }
1782
1783 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1784 case TCP_TW_SYN:
1785 {
1786 struct sock *sk2;
1787
1788 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1789 &ipv6_hdr(skb)->daddr,
1790 ntohs(th->dest), inet6_iif(skb));
1791 if (sk2 != NULL) {
1792 struct inet_timewait_sock *tw = inet_twsk(sk);
1793 inet_twsk_deschedule(tw, &tcp_death_row);
1794 inet_twsk_put(tw);
1795 sk = sk2;
1796 goto process;
1797 }
1798 /* Fall through to ACK */
1799 }
1800 case TCP_TW_ACK:
1801 tcp_v6_timewait_ack(sk, skb);
1802 break;
1803 case TCP_TW_RST:
1804 goto no_tcp_socket;
1805 case TCP_TW_SUCCESS:;
1806 }
1807 goto discard_it;
1808 }
1809
1810 static int tcp_v6_remember_stamp(struct sock *sk)
1811 {
1812 /* Alas, not yet... */
1813 return 0;
1814 }
1815
1816 static struct inet_connection_sock_af_ops ipv6_specific = {
1817 .queue_xmit = inet6_csk_xmit,
1818 .send_check = tcp_v6_send_check,
1819 .rebuild_header = inet6_sk_rebuild_header,
1820 .conn_request = tcp_v6_conn_request,
1821 .syn_recv_sock = tcp_v6_syn_recv_sock,
1822 .remember_stamp = tcp_v6_remember_stamp,
1823 .net_header_len = sizeof(struct ipv6hdr),
1824 .setsockopt = ipv6_setsockopt,
1825 .getsockopt = ipv6_getsockopt,
1826 .addr2sockaddr = inet6_csk_addr2sockaddr,
1827 .sockaddr_len = sizeof(struct sockaddr_in6),
1828 #ifdef CONFIG_COMPAT
1829 .compat_setsockopt = compat_ipv6_setsockopt,
1830 .compat_getsockopt = compat_ipv6_getsockopt,
1831 #endif
1832 };
1833
1834 #ifdef CONFIG_TCP_MD5SIG
1835 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1836 .md5_lookup = tcp_v6_md5_lookup,
1837 .calc_md5_hash = tcp_v6_calc_md5_hash,
1838 .md5_add = tcp_v6_md5_add_func,
1839 .md5_parse = tcp_v6_parse_md5_keys,
1840 };
1841 #endif
1842
1843 /*
1844 * TCP over IPv4 via INET6 API
1845 */
1846
1847 static struct inet_connection_sock_af_ops ipv6_mapped = {
1848 .queue_xmit = ip_queue_xmit,
1849 .send_check = tcp_v4_send_check,
1850 .rebuild_header = inet_sk_rebuild_header,
1851 .conn_request = tcp_v6_conn_request,
1852 .syn_recv_sock = tcp_v6_syn_recv_sock,
1853 .remember_stamp = tcp_v4_remember_stamp,
1854 .net_header_len = sizeof(struct iphdr),
1855 .setsockopt = ipv6_setsockopt,
1856 .getsockopt = ipv6_getsockopt,
1857 .addr2sockaddr = inet6_csk_addr2sockaddr,
1858 .sockaddr_len = sizeof(struct sockaddr_in6),
1859 #ifdef CONFIG_COMPAT
1860 .compat_setsockopt = compat_ipv6_setsockopt,
1861 .compat_getsockopt = compat_ipv6_getsockopt,
1862 #endif
1863 };
1864
1865 #ifdef CONFIG_TCP_MD5SIG
1866 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1867 .md5_lookup = tcp_v4_md5_lookup,
1868 .calc_md5_hash = tcp_v4_calc_md5_hash,
1869 .md5_add = tcp_v6_md5_add_func,
1870 .md5_parse = tcp_v6_parse_md5_keys,
1871 };
1872 #endif
1873
1874 /* NOTE: A lot of things set to zero explicitly by call to
1875 * sk_alloc() so need not be done here.
1876 */
1877 static int tcp_v6_init_sock(struct sock *sk)
1878 {
1879 struct inet_connection_sock *icsk = inet_csk(sk);
1880 struct tcp_sock *tp = tcp_sk(sk);
1881
1882 skb_queue_head_init(&tp->out_of_order_queue);
1883 tcp_init_xmit_timers(sk);
1884 tcp_prequeue_init(tp);
1885
1886 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1887 tp->mdev = TCP_TIMEOUT_INIT;
1888
1889 /* So many TCP implementations out there (incorrectly) count the
1890 * initial SYN frame in their delayed-ACK and congestion control
1891 * algorithms that we must have the following bandaid to talk
1892 * efficiently to them. -DaveM
1893 */
1894 tp->snd_cwnd = 2;
1895
1896 /* See draft-stevens-tcpca-spec-01 for discussion of the
1897 * initialization of these values.
1898 */
1899 tp->snd_ssthresh = 0x7fffffff;
1900 tp->snd_cwnd_clamp = ~0;
1901 tp->mss_cache = 536;
1902
1903 tp->reordering = sysctl_tcp_reordering;
1904
1905 sk->sk_state = TCP_CLOSE;
1906
1907 icsk->icsk_af_ops = &ipv6_specific;
1908 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1909 icsk->icsk_sync_mss = tcp_sync_mss;
1910 sk->sk_write_space = sk_stream_write_space;
1911 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1912
1913 #ifdef CONFIG_TCP_MD5SIG
1914 tp->af_specific = &tcp_sock_ipv6_specific;
1915 #endif
1916
1917 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1918 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1919
1920 atomic_inc(&tcp_sockets_allocated);
1921
1922 return 0;
1923 }
1924
1925 static int tcp_v6_destroy_sock(struct sock *sk)
1926 {
1927 #ifdef CONFIG_TCP_MD5SIG
1928 /* Clean up the MD5 key list */
1929 if (tcp_sk(sk)->md5sig_info)
1930 tcp_v6_clear_md5_list(sk);
1931 #endif
1932 tcp_v4_destroy_sock(sk);
1933 return inet6_destroy_sock(sk);
1934 }
1935
1936 #ifdef CONFIG_PROC_FS
1937 /* Proc filesystem TCPv6 sock list dumping. */
1938 static void get_openreq6(struct seq_file *seq,
1939 struct sock *sk, struct request_sock *req, int i, int uid)
1940 {
1941 int ttd = req->expires - jiffies;
1942 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1943 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1944
1945 if (ttd < 0)
1946 ttd = 0;
1947
1948 seq_printf(seq,
1949 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1950 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1951 i,
1952 src->s6_addr32[0], src->s6_addr32[1],
1953 src->s6_addr32[2], src->s6_addr32[3],
1954 ntohs(inet_sk(sk)->sport),
1955 dest->s6_addr32[0], dest->s6_addr32[1],
1956 dest->s6_addr32[2], dest->s6_addr32[3],
1957 ntohs(inet_rsk(req)->rmt_port),
1958 TCP_SYN_RECV,
1959 0,0, /* could print option size, but that is af dependent. */
1960 1, /* timers active (only the expire timer) */
1961 jiffies_to_clock_t(ttd),
1962 req->retrans,
1963 uid,
1964 0, /* non standard timer */
1965 0, /* open_requests have no inode */
1966 0, req);
1967 }
1968
1969 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1970 {
1971 struct in6_addr *dest, *src;
1972 __u16 destp, srcp;
1973 int timer_active;
1974 unsigned long timer_expires;
1975 struct inet_sock *inet = inet_sk(sp);
1976 struct tcp_sock *tp = tcp_sk(sp);
1977 const struct inet_connection_sock *icsk = inet_csk(sp);
1978 struct ipv6_pinfo *np = inet6_sk(sp);
1979
1980 dest = &np->daddr;
1981 src = &np->rcv_saddr;
1982 destp = ntohs(inet->dport);
1983 srcp = ntohs(inet->sport);
1984
1985 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1986 timer_active = 1;
1987 timer_expires = icsk->icsk_timeout;
1988 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1989 timer_active = 4;
1990 timer_expires = icsk->icsk_timeout;
1991 } else if (timer_pending(&sp->sk_timer)) {
1992 timer_active = 2;
1993 timer_expires = sp->sk_timer.expires;
1994 } else {
1995 timer_active = 0;
1996 timer_expires = jiffies;
1997 }
1998
1999 seq_printf(seq,
2000 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2001 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2002 i,
2003 src->s6_addr32[0], src->s6_addr32[1],
2004 src->s6_addr32[2], src->s6_addr32[3], srcp,
2005 dest->s6_addr32[0], dest->s6_addr32[1],
2006 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2007 sp->sk_state,
2008 tp->write_seq-tp->snd_una,
2009 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2010 timer_active,
2011 jiffies_to_clock_t(timer_expires - jiffies),
2012 icsk->icsk_retransmits,
2013 sock_i_uid(sp),
2014 icsk->icsk_probes_out,
2015 sock_i_ino(sp),
2016 atomic_read(&sp->sk_refcnt), sp,
2017 icsk->icsk_rto,
2018 icsk->icsk_ack.ato,
2019 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2020 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2021 );
2022 }
2023
2024 static void get_timewait6_sock(struct seq_file *seq,
2025 struct inet_timewait_sock *tw, int i)
2026 {
2027 struct in6_addr *dest, *src;
2028 __u16 destp, srcp;
2029 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2030 int ttd = tw->tw_ttd - jiffies;
2031
2032 if (ttd < 0)
2033 ttd = 0;
2034
2035 dest = &tw6->tw_v6_daddr;
2036 src = &tw6->tw_v6_rcv_saddr;
2037 destp = ntohs(tw->tw_dport);
2038 srcp = ntohs(tw->tw_sport);
2039
2040 seq_printf(seq,
2041 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2042 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2043 i,
2044 src->s6_addr32[0], src->s6_addr32[1],
2045 src->s6_addr32[2], src->s6_addr32[3], srcp,
2046 dest->s6_addr32[0], dest->s6_addr32[1],
2047 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2048 tw->tw_substate, 0, 0,
2049 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2050 atomic_read(&tw->tw_refcnt), tw);
2051 }
2052
2053 static int tcp6_seq_show(struct seq_file *seq, void *v)
2054 {
2055 struct tcp_iter_state *st;
2056
2057 if (v == SEQ_START_TOKEN) {
2058 seq_puts(seq,
2059 " sl "
2060 "local_address "
2061 "remote_address "
2062 "st tx_queue rx_queue tr tm->when retrnsmt"
2063 " uid timeout inode\n");
2064 goto out;
2065 }
2066 st = seq->private;
2067
2068 switch (st->state) {
2069 case TCP_SEQ_STATE_LISTENING:
2070 case TCP_SEQ_STATE_ESTABLISHED:
2071 get_tcp6_sock(seq, v, st->num);
2072 break;
2073 case TCP_SEQ_STATE_OPENREQ:
2074 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2075 break;
2076 case TCP_SEQ_STATE_TIME_WAIT:
2077 get_timewait6_sock(seq, v, st->num);
2078 break;
2079 }
2080 out:
2081 return 0;
2082 }
2083
2084 static struct file_operations tcp6_seq_fops;
2085 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2086 .owner = THIS_MODULE,
2087 .name = "tcp6",
2088 .family = AF_INET6,
2089 .seq_show = tcp6_seq_show,
2090 .seq_fops = &tcp6_seq_fops,
2091 };
2092
2093 int __init tcp6_proc_init(void)
2094 {
2095 return tcp_proc_register(&tcp6_seq_afinfo);
2096 }
2097
2098 void tcp6_proc_exit(void)
2099 {
2100 tcp_proc_unregister(&tcp6_seq_afinfo);
2101 }
2102 #endif
2103
2104 struct proto tcpv6_prot = {
2105 .name = "TCPv6",
2106 .owner = THIS_MODULE,
2107 .close = tcp_close,
2108 .connect = tcp_v6_connect,
2109 .disconnect = tcp_disconnect,
2110 .accept = inet_csk_accept,
2111 .ioctl = tcp_ioctl,
2112 .init = tcp_v6_init_sock,
2113 .destroy = tcp_v6_destroy_sock,
2114 .shutdown = tcp_shutdown,
2115 .setsockopt = tcp_setsockopt,
2116 .getsockopt = tcp_getsockopt,
2117 .recvmsg = tcp_recvmsg,
2118 .backlog_rcv = tcp_v6_do_rcv,
2119 .hash = tcp_v6_hash,
2120 .unhash = tcp_unhash,
2121 .get_port = tcp_v6_get_port,
2122 .enter_memory_pressure = tcp_enter_memory_pressure,
2123 .sockets_allocated = &tcp_sockets_allocated,
2124 .memory_allocated = &tcp_memory_allocated,
2125 .memory_pressure = &tcp_memory_pressure,
2126 .orphan_count = &tcp_orphan_count,
2127 .sysctl_mem = sysctl_tcp_mem,
2128 .sysctl_wmem = sysctl_tcp_wmem,
2129 .sysctl_rmem = sysctl_tcp_rmem,
2130 .max_header = MAX_TCP_HEADER,
2131 .obj_size = sizeof(struct tcp6_sock),
2132 .twsk_prot = &tcp6_timewait_sock_ops,
2133 .rsk_prot = &tcp6_request_sock_ops,
2134 #ifdef CONFIG_COMPAT
2135 .compat_setsockopt = compat_tcp_setsockopt,
2136 .compat_getsockopt = compat_tcp_getsockopt,
2137 #endif
2138 };
2139
2140 static struct inet6_protocol tcpv6_protocol = {
2141 .handler = tcp_v6_rcv,
2142 .err_handler = tcp_v6_err,
2143 .gso_send_check = tcp_v6_gso_send_check,
2144 .gso_segment = tcp_tso_segment,
2145 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2146 };
2147
2148 static struct inet_protosw tcpv6_protosw = {
2149 .type = SOCK_STREAM,
2150 .protocol = IPPROTO_TCP,
2151 .prot = &tcpv6_prot,
2152 .ops = &inet6_stream_ops,
2153 .capability = -1,
2154 .no_check = 0,
2155 .flags = INET_PROTOSW_PERMANENT |
2156 INET_PROTOSW_ICSK,
2157 };
2158
2159 void __init tcpv6_init(void)
2160 {
2161 /* register inet6 protocol */
2162 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2163 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2164 inet6_register_protosw(&tcpv6_protosw);
2165
2166 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
2167 IPPROTO_TCP) < 0)
2168 panic("Failed to create the TCPv6 control socket.\n");
2169 }