[IPV4]: Use the {DEFINE|REF}_PROTO_INUSE infrastructure
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63
64 #include <asm/uaccess.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
71
72 /* Socket used for sending RSTs and ACKs */
73 static struct socket *tcp6_socket;
74
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
77 static void tcp_v6_send_check(struct sock *sk, int len,
78 struct sk_buff *skb);
79
80 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81
82 static struct inet_connection_sock_af_ops ipv6_mapped;
83 static struct inet_connection_sock_af_ops ipv6_specific;
84 #ifdef CONFIG_TCP_MD5SIG
85 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 #endif
88
89 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
90 {
91 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
92 inet6_csk_bind_conflict);
93 }
94
95 static void tcp_v6_hash(struct sock *sk)
96 {
97 if (sk->sk_state != TCP_CLOSE) {
98 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
99 tcp_prot.hash(sk);
100 return;
101 }
102 local_bh_disable();
103 __inet6_hash(&tcp_hashinfo, sk);
104 local_bh_enable();
105 }
106 }
107
108 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
109 struct in6_addr *saddr,
110 struct in6_addr *daddr,
111 __wsum base)
112 {
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114 }
115
116 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
117 {
118 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122 }
123
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125 int addr_len)
126 {
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 struct inet_sock *inet = inet_sk(sk);
129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
133 struct flowi fl;
134 struct dst_entry *dst;
135 int addr_type;
136 int err;
137
138 if (addr_len < SIN6_LEN_RFC2133)
139 return -EINVAL;
140
141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT);
143
144 memset(&fl, 0, sizeof(fl));
145
146 if (np->sndflow) {
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
153 return -EINVAL;
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
156 }
157 }
158
159 /*
160 * connect() to INADDR_ANY means loopback (BSD'ism).
161 */
162
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168 if(addr_type & IPV6_ADDR_MULTICAST)
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
198
199 /*
200 * TCP over IPv4
201 */
202
203 if (addr_type == IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220 #endif
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
228 #ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230 #endif
231 goto failure;
232 } else {
233 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
234 inet->saddr);
235 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
236 inet->rcv_saddr);
237 }
238
239 return err;
240 }
241
242 if (!ipv6_addr_any(&np->rcv_saddr))
243 saddr = &np->rcv_saddr;
244
245 fl.proto = IPPROTO_TCP;
246 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
247 ipv6_addr_copy(&fl.fl6_src,
248 (saddr ? saddr : &np->saddr));
249 fl.oif = sk->sk_bound_dev_if;
250 fl.fl_ip_dport = usin->sin6_port;
251 fl.fl_ip_sport = inet->sport;
252
253 if (np->opt && np->opt->srcrt) {
254 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
255 ipv6_addr_copy(&final, &fl.fl6_dst);
256 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257 final_p = &final;
258 }
259
260 security_sk_classify_flow(sk, &fl);
261
262 err = ip6_dst_lookup(sk, &dst, &fl);
263 if (err)
264 goto failure;
265 if (final_p)
266 ipv6_addr_copy(&fl.fl6_dst, final_p);
267
268 if ((err = __xfrm_lookup(&dst, &fl, sk, 1)) < 0) {
269 if (err == -EREMOTE)
270 err = ip6_dst_blackhole(sk, &dst, &fl);
271 if (err < 0)
272 goto failure;
273 }
274
275 if (saddr == NULL) {
276 saddr = &fl.fl6_src;
277 ipv6_addr_copy(&np->rcv_saddr, saddr);
278 }
279
280 /* set the source address */
281 ipv6_addr_copy(&np->saddr, saddr);
282 inet->rcv_saddr = LOOPBACK4_IPV6;
283
284 sk->sk_gso_type = SKB_GSO_TCPV6;
285 __ip6_dst_store(sk, dst, NULL, NULL);
286
287 icsk->icsk_ext_hdr_len = 0;
288 if (np->opt)
289 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
290 np->opt->opt_nflen);
291
292 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
293
294 inet->dport = usin->sin6_port;
295
296 tcp_set_state(sk, TCP_SYN_SENT);
297 err = inet6_hash_connect(&tcp_death_row, sk);
298 if (err)
299 goto late_failure;
300
301 if (!tp->write_seq)
302 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
303 np->daddr.s6_addr32,
304 inet->sport,
305 inet->dport);
306
307 err = tcp_connect(sk);
308 if (err)
309 goto late_failure;
310
311 return 0;
312
313 late_failure:
314 tcp_set_state(sk, TCP_CLOSE);
315 __sk_dst_reset(sk);
316 failure:
317 inet->dport = 0;
318 sk->sk_route_caps = 0;
319 return err;
320 }
321
322 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
323 int type, int code, int offset, __be32 info)
324 {
325 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
326 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
327 struct ipv6_pinfo *np;
328 struct sock *sk;
329 int err;
330 struct tcp_sock *tp;
331 __u32 seq;
332
333 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
334 th->source, skb->dev->ifindex);
335
336 if (sk == NULL) {
337 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
338 return;
339 }
340
341 if (sk->sk_state == TCP_TIME_WAIT) {
342 inet_twsk_put(inet_twsk(sk));
343 return;
344 }
345
346 bh_lock_sock(sk);
347 if (sock_owned_by_user(sk))
348 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
349
350 if (sk->sk_state == TCP_CLOSE)
351 goto out;
352
353 tp = tcp_sk(sk);
354 seq = ntohl(th->seq);
355 if (sk->sk_state != TCP_LISTEN &&
356 !between(seq, tp->snd_una, tp->snd_nxt)) {
357 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
358 goto out;
359 }
360
361 np = inet6_sk(sk);
362
363 if (type == ICMPV6_PKT_TOOBIG) {
364 struct dst_entry *dst = NULL;
365
366 if (sock_owned_by_user(sk))
367 goto out;
368 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
369 goto out;
370
371 /* icmp should have updated the destination cache entry */
372 dst = __sk_dst_check(sk, np->dst_cookie);
373
374 if (dst == NULL) {
375 struct inet_sock *inet = inet_sk(sk);
376 struct flowi fl;
377
378 /* BUGGG_FUTURE: Again, it is not clear how
379 to handle rthdr case. Ignore this complexity
380 for now.
381 */
382 memset(&fl, 0, sizeof(fl));
383 fl.proto = IPPROTO_TCP;
384 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
385 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
386 fl.oif = sk->sk_bound_dev_if;
387 fl.fl_ip_dport = inet->dport;
388 fl.fl_ip_sport = inet->sport;
389 security_skb_classify_flow(skb, &fl);
390
391 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
392 sk->sk_err_soft = -err;
393 goto out;
394 }
395
396 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
397 sk->sk_err_soft = -err;
398 goto out;
399 }
400
401 } else
402 dst_hold(dst);
403
404 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
405 tcp_sync_mss(sk, dst_mtu(dst));
406 tcp_simple_retransmit(sk);
407 } /* else let the usual retransmit timer handle it */
408 dst_release(dst);
409 goto out;
410 }
411
412 icmpv6_err_convert(type, code, &err);
413
414 /* Might be for an request_sock */
415 switch (sk->sk_state) {
416 struct request_sock *req, **prev;
417 case TCP_LISTEN:
418 if (sock_owned_by_user(sk))
419 goto out;
420
421 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
422 &hdr->saddr, inet6_iif(skb));
423 if (!req)
424 goto out;
425
426 /* ICMPs are not backlogged, hence we cannot get
427 * an established socket here.
428 */
429 BUG_TRAP(req->sk == NULL);
430
431 if (seq != tcp_rsk(req)->snt_isn) {
432 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
433 goto out;
434 }
435
436 inet_csk_reqsk_queue_drop(sk, req, prev);
437 goto out;
438
439 case TCP_SYN_SENT:
440 case TCP_SYN_RECV: /* Cannot happen.
441 It can, it SYNs are crossed. --ANK */
442 if (!sock_owned_by_user(sk)) {
443 sk->sk_err = err;
444 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
445
446 tcp_done(sk);
447 } else
448 sk->sk_err_soft = err;
449 goto out;
450 }
451
452 if (!sock_owned_by_user(sk) && np->recverr) {
453 sk->sk_err = err;
454 sk->sk_error_report(sk);
455 } else
456 sk->sk_err_soft = err;
457
458 out:
459 bh_unlock_sock(sk);
460 sock_put(sk);
461 }
462
463
464 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
465 struct dst_entry *dst)
466 {
467 struct inet6_request_sock *treq = inet6_rsk(req);
468 struct ipv6_pinfo *np = inet6_sk(sk);
469 struct sk_buff * skb;
470 struct ipv6_txoptions *opt = NULL;
471 struct in6_addr * final_p = NULL, final;
472 struct flowi fl;
473 int err = -1;
474
475 memset(&fl, 0, sizeof(fl));
476 fl.proto = IPPROTO_TCP;
477 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
478 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
479 fl.fl6_flowlabel = 0;
480 fl.oif = treq->iif;
481 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
482 fl.fl_ip_sport = inet_sk(sk)->sport;
483 security_req_classify_flow(req, &fl);
484
485 if (dst == NULL) {
486 opt = np->opt;
487 if (opt && opt->srcrt) {
488 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
489 ipv6_addr_copy(&final, &fl.fl6_dst);
490 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
491 final_p = &final;
492 }
493
494 err = ip6_dst_lookup(sk, &dst, &fl);
495 if (err)
496 goto done;
497 if (final_p)
498 ipv6_addr_copy(&fl.fl6_dst, final_p);
499 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
500 goto done;
501 }
502
503 skb = tcp_make_synack(sk, dst, req);
504 if (skb) {
505 struct tcphdr *th = tcp_hdr(skb);
506
507 th->check = tcp_v6_check(th, skb->len,
508 &treq->loc_addr, &treq->rmt_addr,
509 csum_partial((char *)th, skb->len, skb->csum));
510
511 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
512 err = ip6_xmit(sk, skb, &fl, opt, 0);
513 err = net_xmit_eval(err);
514 }
515
516 done:
517 if (opt && opt != np->opt)
518 sock_kfree_s(sk, opt, opt->tot_len);
519 dst_release(dst);
520 return err;
521 }
522
523 static void tcp_v6_reqsk_destructor(struct request_sock *req)
524 {
525 if (inet6_rsk(req)->pktopts)
526 kfree_skb(inet6_rsk(req)->pktopts);
527 }
528
529 #ifdef CONFIG_TCP_MD5SIG
530 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
531 struct in6_addr *addr)
532 {
533 struct tcp_sock *tp = tcp_sk(sk);
534 int i;
535
536 BUG_ON(tp == NULL);
537
538 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
539 return NULL;
540
541 for (i = 0; i < tp->md5sig_info->entries6; i++) {
542 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
543 return &tp->md5sig_info->keys6[i].base;
544 }
545 return NULL;
546 }
547
548 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
549 struct sock *addr_sk)
550 {
551 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
552 }
553
554 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
555 struct request_sock *req)
556 {
557 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
558 }
559
560 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
561 char *newkey, u8 newkeylen)
562 {
563 /* Add key to the list */
564 struct tcp_md5sig_key *key;
565 struct tcp_sock *tp = tcp_sk(sk);
566 struct tcp6_md5sig_key *keys;
567
568 key = tcp_v6_md5_do_lookup(sk, peer);
569 if (key) {
570 /* modify existing entry - just update that one */
571 kfree(key->key);
572 key->key = newkey;
573 key->keylen = newkeylen;
574 } else {
575 /* reallocate new list if current one is full. */
576 if (!tp->md5sig_info) {
577 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
578 if (!tp->md5sig_info) {
579 kfree(newkey);
580 return -ENOMEM;
581 }
582 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
583 }
584 tcp_alloc_md5sig_pool();
585 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
586 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
587 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
588
589 if (!keys) {
590 tcp_free_md5sig_pool();
591 kfree(newkey);
592 return -ENOMEM;
593 }
594
595 if (tp->md5sig_info->entries6)
596 memmove(keys, tp->md5sig_info->keys6,
597 (sizeof (tp->md5sig_info->keys6[0]) *
598 tp->md5sig_info->entries6));
599
600 kfree(tp->md5sig_info->keys6);
601 tp->md5sig_info->keys6 = keys;
602 tp->md5sig_info->alloced6++;
603 }
604
605 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
606 peer);
607 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
608 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
609
610 tp->md5sig_info->entries6++;
611 }
612 return 0;
613 }
614
615 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
616 u8 *newkey, __u8 newkeylen)
617 {
618 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
619 newkey, newkeylen);
620 }
621
622 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
623 {
624 struct tcp_sock *tp = tcp_sk(sk);
625 int i;
626
627 for (i = 0; i < tp->md5sig_info->entries6; i++) {
628 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
629 /* Free the key */
630 kfree(tp->md5sig_info->keys6[i].base.key);
631 tp->md5sig_info->entries6--;
632
633 if (tp->md5sig_info->entries6 == 0) {
634 kfree(tp->md5sig_info->keys6);
635 tp->md5sig_info->keys6 = NULL;
636 tp->md5sig_info->alloced6 = 0;
637
638 tcp_free_md5sig_pool();
639
640 return 0;
641 } else {
642 /* shrink the database */
643 if (tp->md5sig_info->entries6 != i)
644 memmove(&tp->md5sig_info->keys6[i],
645 &tp->md5sig_info->keys6[i+1],
646 (tp->md5sig_info->entries6 - i)
647 * sizeof (tp->md5sig_info->keys6[0]));
648 }
649 }
650 }
651 return -ENOENT;
652 }
653
654 static void tcp_v6_clear_md5_list (struct sock *sk)
655 {
656 struct tcp_sock *tp = tcp_sk(sk);
657 int i;
658
659 if (tp->md5sig_info->entries6) {
660 for (i = 0; i < tp->md5sig_info->entries6; i++)
661 kfree(tp->md5sig_info->keys6[i].base.key);
662 tp->md5sig_info->entries6 = 0;
663 tcp_free_md5sig_pool();
664 }
665
666 kfree(tp->md5sig_info->keys6);
667 tp->md5sig_info->keys6 = NULL;
668 tp->md5sig_info->alloced6 = 0;
669
670 if (tp->md5sig_info->entries4) {
671 for (i = 0; i < tp->md5sig_info->entries4; i++)
672 kfree(tp->md5sig_info->keys4[i].base.key);
673 tp->md5sig_info->entries4 = 0;
674 tcp_free_md5sig_pool();
675 }
676
677 kfree(tp->md5sig_info->keys4);
678 tp->md5sig_info->keys4 = NULL;
679 tp->md5sig_info->alloced4 = 0;
680 }
681
682 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
683 int optlen)
684 {
685 struct tcp_md5sig cmd;
686 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
687 u8 *newkey;
688
689 if (optlen < sizeof(cmd))
690 return -EINVAL;
691
692 if (copy_from_user(&cmd, optval, sizeof(cmd)))
693 return -EFAULT;
694
695 if (sin6->sin6_family != AF_INET6)
696 return -EINVAL;
697
698 if (!cmd.tcpm_keylen) {
699 if (!tcp_sk(sk)->md5sig_info)
700 return -ENOENT;
701 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
702 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
703 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
704 }
705
706 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
707 return -EINVAL;
708
709 if (!tcp_sk(sk)->md5sig_info) {
710 struct tcp_sock *tp = tcp_sk(sk);
711 struct tcp_md5sig_info *p;
712
713 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
714 if (!p)
715 return -ENOMEM;
716
717 tp->md5sig_info = p;
718 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
719 }
720
721 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
722 if (!newkey)
723 return -ENOMEM;
724 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
725 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
726 newkey, cmd.tcpm_keylen);
727 }
728 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
729 }
730
731 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
732 struct in6_addr *saddr,
733 struct in6_addr *daddr,
734 struct tcphdr *th, int protocol,
735 int tcplen)
736 {
737 struct scatterlist sg[4];
738 __u16 data_len;
739 int block = 0;
740 __sum16 cksum;
741 struct tcp_md5sig_pool *hp;
742 struct tcp6_pseudohdr *bp;
743 struct hash_desc *desc;
744 int err;
745 unsigned int nbytes = 0;
746
747 hp = tcp_get_md5sig_pool();
748 if (!hp) {
749 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
750 goto clear_hash_noput;
751 }
752 bp = &hp->md5_blk.ip6;
753 desc = &hp->md5_desc;
754
755 /* 1. TCP pseudo-header (RFC2460) */
756 ipv6_addr_copy(&bp->saddr, saddr);
757 ipv6_addr_copy(&bp->daddr, daddr);
758 bp->len = htonl(tcplen);
759 bp->protocol = htonl(protocol);
760
761 sg_init_table(sg, 4);
762
763 sg_set_buf(&sg[block++], bp, sizeof(*bp));
764 nbytes += sizeof(*bp);
765
766 /* 2. TCP header, excluding options */
767 cksum = th->check;
768 th->check = 0;
769 sg_set_buf(&sg[block++], th, sizeof(*th));
770 nbytes += sizeof(*th);
771
772 /* 3. TCP segment data (if any) */
773 data_len = tcplen - (th->doff << 2);
774 if (data_len > 0) {
775 u8 *data = (u8 *)th + (th->doff << 2);
776 sg_set_buf(&sg[block++], data, data_len);
777 nbytes += data_len;
778 }
779
780 /* 4. shared key */
781 sg_set_buf(&sg[block++], key->key, key->keylen);
782 nbytes += key->keylen;
783
784 sg_mark_end(&sg[block - 1]);
785
786 /* Now store the hash into the packet */
787 err = crypto_hash_init(desc);
788 if (err) {
789 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
790 goto clear_hash;
791 }
792 err = crypto_hash_update(desc, sg, nbytes);
793 if (err) {
794 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
795 goto clear_hash;
796 }
797 err = crypto_hash_final(desc, md5_hash);
798 if (err) {
799 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
800 goto clear_hash;
801 }
802
803 /* Reset header, and free up the crypto */
804 tcp_put_md5sig_pool();
805 th->check = cksum;
806 out:
807 return 0;
808 clear_hash:
809 tcp_put_md5sig_pool();
810 clear_hash_noput:
811 memset(md5_hash, 0, 16);
812 goto out;
813 }
814
815 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
816 struct sock *sk,
817 struct dst_entry *dst,
818 struct request_sock *req,
819 struct tcphdr *th, int protocol,
820 int tcplen)
821 {
822 struct in6_addr *saddr, *daddr;
823
824 if (sk) {
825 saddr = &inet6_sk(sk)->saddr;
826 daddr = &inet6_sk(sk)->daddr;
827 } else {
828 saddr = &inet6_rsk(req)->loc_addr;
829 daddr = &inet6_rsk(req)->rmt_addr;
830 }
831 return tcp_v6_do_calc_md5_hash(md5_hash, key,
832 saddr, daddr,
833 th, protocol, tcplen);
834 }
835
836 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
837 {
838 __u8 *hash_location = NULL;
839 struct tcp_md5sig_key *hash_expected;
840 struct ipv6hdr *ip6h = ipv6_hdr(skb);
841 struct tcphdr *th = tcp_hdr(skb);
842 int length = (th->doff << 2) - sizeof (*th);
843 int genhash;
844 u8 *ptr;
845 u8 newhash[16];
846
847 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
848
849 /* If the TCP option is too short, we can short cut */
850 if (length < TCPOLEN_MD5SIG)
851 return hash_expected ? 1 : 0;
852
853 /* parse options */
854 ptr = (u8*)(th + 1);
855 while (length > 0) {
856 int opcode = *ptr++;
857 int opsize;
858
859 switch(opcode) {
860 case TCPOPT_EOL:
861 goto done_opts;
862 case TCPOPT_NOP:
863 length--;
864 continue;
865 default:
866 opsize = *ptr++;
867 if (opsize < 2 || opsize > length)
868 goto done_opts;
869 if (opcode == TCPOPT_MD5SIG) {
870 hash_location = ptr;
871 goto done_opts;
872 }
873 }
874 ptr += opsize - 2;
875 length -= opsize;
876 }
877
878 done_opts:
879 /* do we have a hash as expected? */
880 if (!hash_expected) {
881 if (!hash_location)
882 return 0;
883 if (net_ratelimit()) {
884 printk(KERN_INFO "MD5 Hash NOT expected but found "
885 "(" NIP6_FMT ", %u)->"
886 "(" NIP6_FMT ", %u)\n",
887 NIP6(ip6h->saddr), ntohs(th->source),
888 NIP6(ip6h->daddr), ntohs(th->dest));
889 }
890 return 1;
891 }
892
893 if (!hash_location) {
894 if (net_ratelimit()) {
895 printk(KERN_INFO "MD5 Hash expected but NOT found "
896 "(" NIP6_FMT ", %u)->"
897 "(" NIP6_FMT ", %u)\n",
898 NIP6(ip6h->saddr), ntohs(th->source),
899 NIP6(ip6h->daddr), ntohs(th->dest));
900 }
901 return 1;
902 }
903
904 /* check the signature */
905 genhash = tcp_v6_do_calc_md5_hash(newhash,
906 hash_expected,
907 &ip6h->saddr, &ip6h->daddr,
908 th, sk->sk_protocol,
909 skb->len);
910 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
911 if (net_ratelimit()) {
912 printk(KERN_INFO "MD5 Hash %s for "
913 "(" NIP6_FMT ", %u)->"
914 "(" NIP6_FMT ", %u)\n",
915 genhash ? "failed" : "mismatch",
916 NIP6(ip6h->saddr), ntohs(th->source),
917 NIP6(ip6h->daddr), ntohs(th->dest));
918 }
919 return 1;
920 }
921 return 0;
922 }
923 #endif
924
925 static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
926 .family = AF_INET6,
927 .obj_size = sizeof(struct tcp6_request_sock),
928 .rtx_syn_ack = tcp_v6_send_synack,
929 .send_ack = tcp_v6_reqsk_send_ack,
930 .destructor = tcp_v6_reqsk_destructor,
931 .send_reset = tcp_v6_send_reset
932 };
933
934 #ifdef CONFIG_TCP_MD5SIG
935 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
936 .md5_lookup = tcp_v6_reqsk_md5_lookup,
937 };
938 #endif
939
940 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
941 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
942 .twsk_unique = tcp_twsk_unique,
943 .twsk_destructor= tcp_twsk_destructor,
944 };
945
946 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
947 {
948 struct ipv6_pinfo *np = inet6_sk(sk);
949 struct tcphdr *th = tcp_hdr(skb);
950
951 if (skb->ip_summed == CHECKSUM_PARTIAL) {
952 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
953 skb->csum_start = skb_transport_header(skb) - skb->head;
954 skb->csum_offset = offsetof(struct tcphdr, check);
955 } else {
956 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
957 csum_partial((char *)th, th->doff<<2,
958 skb->csum));
959 }
960 }
961
962 static int tcp_v6_gso_send_check(struct sk_buff *skb)
963 {
964 struct ipv6hdr *ipv6h;
965 struct tcphdr *th;
966
967 if (!pskb_may_pull(skb, sizeof(*th)))
968 return -EINVAL;
969
970 ipv6h = ipv6_hdr(skb);
971 th = tcp_hdr(skb);
972
973 th->check = 0;
974 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
975 IPPROTO_TCP, 0);
976 skb->csum_start = skb_transport_header(skb) - skb->head;
977 skb->csum_offset = offsetof(struct tcphdr, check);
978 skb->ip_summed = CHECKSUM_PARTIAL;
979 return 0;
980 }
981
982 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
983 {
984 struct tcphdr *th = tcp_hdr(skb), *t1;
985 struct sk_buff *buff;
986 struct flowi fl;
987 int tot_len = sizeof(*th);
988 #ifdef CONFIG_TCP_MD5SIG
989 struct tcp_md5sig_key *key;
990 #endif
991
992 if (th->rst)
993 return;
994
995 if (!ipv6_unicast_destination(skb))
996 return;
997
998 #ifdef CONFIG_TCP_MD5SIG
999 if (sk)
1000 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1001 else
1002 key = NULL;
1003
1004 if (key)
1005 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1006 #endif
1007
1008 /*
1009 * We need to grab some memory, and put together an RST,
1010 * and then put it into the queue to be sent.
1011 */
1012
1013 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1014 GFP_ATOMIC);
1015 if (buff == NULL)
1016 return;
1017
1018 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1019
1020 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1021
1022 /* Swap the send and the receive. */
1023 memset(t1, 0, sizeof(*t1));
1024 t1->dest = th->source;
1025 t1->source = th->dest;
1026 t1->doff = tot_len / 4;
1027 t1->rst = 1;
1028
1029 if(th->ack) {
1030 t1->seq = th->ack_seq;
1031 } else {
1032 t1->ack = 1;
1033 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1034 + skb->len - (th->doff<<2));
1035 }
1036
1037 #ifdef CONFIG_TCP_MD5SIG
1038 if (key) {
1039 __be32 *opt = (__be32*)(t1 + 1);
1040 opt[0] = htonl((TCPOPT_NOP << 24) |
1041 (TCPOPT_NOP << 16) |
1042 (TCPOPT_MD5SIG << 8) |
1043 TCPOLEN_MD5SIG);
1044 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1045 &ipv6_hdr(skb)->daddr,
1046 &ipv6_hdr(skb)->saddr,
1047 t1, IPPROTO_TCP, tot_len);
1048 }
1049 #endif
1050
1051 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1052
1053 memset(&fl, 0, sizeof(fl));
1054 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1055 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1056
1057 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1058 sizeof(*t1), IPPROTO_TCP,
1059 buff->csum);
1060
1061 fl.proto = IPPROTO_TCP;
1062 fl.oif = inet6_iif(skb);
1063 fl.fl_ip_dport = t1->dest;
1064 fl.fl_ip_sport = t1->source;
1065 security_skb_classify_flow(skb, &fl);
1066
1067 /* sk = NULL, but it is safe for now. RST socket required. */
1068 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1069
1070 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1071 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1072 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1073 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1074 return;
1075 }
1076 }
1077
1078 kfree_skb(buff);
1079 }
1080
1081 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1082 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1083 {
1084 struct tcphdr *th = tcp_hdr(skb), *t1;
1085 struct sk_buff *buff;
1086 struct flowi fl;
1087 int tot_len = sizeof(struct tcphdr);
1088 __be32 *topt;
1089 #ifdef CONFIG_TCP_MD5SIG
1090 struct tcp_md5sig_key *key;
1091 struct tcp_md5sig_key tw_key;
1092 #endif
1093
1094 #ifdef CONFIG_TCP_MD5SIG
1095 if (!tw && skb->sk) {
1096 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1097 } else if (tw && tw->tw_md5_keylen) {
1098 tw_key.key = tw->tw_md5_key;
1099 tw_key.keylen = tw->tw_md5_keylen;
1100 key = &tw_key;
1101 } else {
1102 key = NULL;
1103 }
1104 #endif
1105
1106 if (ts)
1107 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1108 #ifdef CONFIG_TCP_MD5SIG
1109 if (key)
1110 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1111 #endif
1112
1113 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1114 GFP_ATOMIC);
1115 if (buff == NULL)
1116 return;
1117
1118 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1119
1120 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1121
1122 /* Swap the send and the receive. */
1123 memset(t1, 0, sizeof(*t1));
1124 t1->dest = th->source;
1125 t1->source = th->dest;
1126 t1->doff = tot_len/4;
1127 t1->seq = htonl(seq);
1128 t1->ack_seq = htonl(ack);
1129 t1->ack = 1;
1130 t1->window = htons(win);
1131
1132 topt = (__be32 *)(t1 + 1);
1133
1134 if (ts) {
1135 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1136 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1137 *topt++ = htonl(tcp_time_stamp);
1138 *topt = htonl(ts);
1139 }
1140
1141 #ifdef CONFIG_TCP_MD5SIG
1142 if (key) {
1143 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1144 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1145 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1146 &ipv6_hdr(skb)->daddr,
1147 &ipv6_hdr(skb)->saddr,
1148 t1, IPPROTO_TCP, tot_len);
1149 }
1150 #endif
1151
1152 buff->csum = csum_partial((char *)t1, tot_len, 0);
1153
1154 memset(&fl, 0, sizeof(fl));
1155 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1156 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1157
1158 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1159 tot_len, IPPROTO_TCP,
1160 buff->csum);
1161
1162 fl.proto = IPPROTO_TCP;
1163 fl.oif = inet6_iif(skb);
1164 fl.fl_ip_dport = t1->dest;
1165 fl.fl_ip_sport = t1->source;
1166 security_skb_classify_flow(skb, &fl);
1167
1168 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1169 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1170 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1171 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1172 return;
1173 }
1174 }
1175
1176 kfree_skb(buff);
1177 }
1178
1179 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1180 {
1181 struct inet_timewait_sock *tw = inet_twsk(sk);
1182 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1183
1184 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1185 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1186 tcptw->tw_ts_recent);
1187
1188 inet_twsk_put(tw);
1189 }
1190
1191 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1192 {
1193 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1194 }
1195
1196
1197 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1198 {
1199 struct request_sock *req, **prev;
1200 const struct tcphdr *th = tcp_hdr(skb);
1201 struct sock *nsk;
1202
1203 /* Find possible connection requests. */
1204 req = inet6_csk_search_req(sk, &prev, th->source,
1205 &ipv6_hdr(skb)->saddr,
1206 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1207 if (req)
1208 return tcp_check_req(sk, skb, req, prev);
1209
1210 nsk = __inet6_lookup_established(&tcp_hashinfo, &ipv6_hdr(skb)->saddr,
1211 th->source, &ipv6_hdr(skb)->daddr,
1212 ntohs(th->dest), inet6_iif(skb));
1213
1214 if (nsk) {
1215 if (nsk->sk_state != TCP_TIME_WAIT) {
1216 bh_lock_sock(nsk);
1217 return nsk;
1218 }
1219 inet_twsk_put(inet_twsk(nsk));
1220 return NULL;
1221 }
1222
1223 #if 0 /*def CONFIG_SYN_COOKIES*/
1224 if (!th->rst && !th->syn && th->ack)
1225 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1226 #endif
1227 return sk;
1228 }
1229
1230 /* FIXME: this is substantially similar to the ipv4 code.
1231 * Can some kind of merge be done? -- erics
1232 */
1233 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1234 {
1235 struct inet6_request_sock *treq;
1236 struct ipv6_pinfo *np = inet6_sk(sk);
1237 struct tcp_options_received tmp_opt;
1238 struct tcp_sock *tp = tcp_sk(sk);
1239 struct request_sock *req = NULL;
1240 __u32 isn = TCP_SKB_CB(skb)->when;
1241
1242 if (skb->protocol == htons(ETH_P_IP))
1243 return tcp_v4_conn_request(sk, skb);
1244
1245 if (!ipv6_unicast_destination(skb))
1246 goto drop;
1247
1248 /*
1249 * There are no SYN attacks on IPv6, yet...
1250 */
1251 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1252 if (net_ratelimit())
1253 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1254 goto drop;
1255 }
1256
1257 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1258 goto drop;
1259
1260 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1261 if (req == NULL)
1262 goto drop;
1263
1264 #ifdef CONFIG_TCP_MD5SIG
1265 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1266 #endif
1267
1268 tcp_clear_options(&tmp_opt);
1269 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1270 tmp_opt.user_mss = tp->rx_opt.user_mss;
1271
1272 tcp_parse_options(skb, &tmp_opt, 0);
1273
1274 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1275 tcp_openreq_init(req, &tmp_opt, skb);
1276
1277 treq = inet6_rsk(req);
1278 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1279 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1280 TCP_ECN_create_request(req, tcp_hdr(skb));
1281 treq->pktopts = NULL;
1282 if (ipv6_opt_accepted(sk, skb) ||
1283 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1284 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1285 atomic_inc(&skb->users);
1286 treq->pktopts = skb;
1287 }
1288 treq->iif = sk->sk_bound_dev_if;
1289
1290 /* So that link locals have meaning */
1291 if (!sk->sk_bound_dev_if &&
1292 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1293 treq->iif = inet6_iif(skb);
1294
1295 if (isn == 0)
1296 isn = tcp_v6_init_sequence(skb);
1297
1298 tcp_rsk(req)->snt_isn = isn;
1299
1300 security_inet_conn_request(sk, skb, req);
1301
1302 if (tcp_v6_send_synack(sk, req, NULL))
1303 goto drop;
1304
1305 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1306 return 0;
1307
1308 drop:
1309 if (req)
1310 reqsk_free(req);
1311
1312 return 0; /* don't send reset */
1313 }
1314
1315 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1316 struct request_sock *req,
1317 struct dst_entry *dst)
1318 {
1319 struct inet6_request_sock *treq = inet6_rsk(req);
1320 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1321 struct tcp6_sock *newtcp6sk;
1322 struct inet_sock *newinet;
1323 struct tcp_sock *newtp;
1324 struct sock *newsk;
1325 struct ipv6_txoptions *opt;
1326 #ifdef CONFIG_TCP_MD5SIG
1327 struct tcp_md5sig_key *key;
1328 #endif
1329
1330 if (skb->protocol == htons(ETH_P_IP)) {
1331 /*
1332 * v6 mapped
1333 */
1334
1335 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1336
1337 if (newsk == NULL)
1338 return NULL;
1339
1340 newtcp6sk = (struct tcp6_sock *)newsk;
1341 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1342
1343 newinet = inet_sk(newsk);
1344 newnp = inet6_sk(newsk);
1345 newtp = tcp_sk(newsk);
1346
1347 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1348
1349 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1350 newinet->daddr);
1351
1352 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1353 newinet->saddr);
1354
1355 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1356
1357 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1358 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1359 #ifdef CONFIG_TCP_MD5SIG
1360 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1361 #endif
1362
1363 newnp->pktoptions = NULL;
1364 newnp->opt = NULL;
1365 newnp->mcast_oif = inet6_iif(skb);
1366 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1367
1368 /*
1369 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1370 * here, tcp_create_openreq_child now does this for us, see the comment in
1371 * that function for the gory details. -acme
1372 */
1373
1374 /* It is tricky place. Until this moment IPv4 tcp
1375 worked with IPv6 icsk.icsk_af_ops.
1376 Sync it now.
1377 */
1378 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1379
1380 return newsk;
1381 }
1382
1383 opt = np->opt;
1384
1385 if (sk_acceptq_is_full(sk))
1386 goto out_overflow;
1387
1388 if (dst == NULL) {
1389 struct in6_addr *final_p = NULL, final;
1390 struct flowi fl;
1391
1392 memset(&fl, 0, sizeof(fl));
1393 fl.proto = IPPROTO_TCP;
1394 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1395 if (opt && opt->srcrt) {
1396 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1397 ipv6_addr_copy(&final, &fl.fl6_dst);
1398 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1399 final_p = &final;
1400 }
1401 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1402 fl.oif = sk->sk_bound_dev_if;
1403 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1404 fl.fl_ip_sport = inet_sk(sk)->sport;
1405 security_req_classify_flow(req, &fl);
1406
1407 if (ip6_dst_lookup(sk, &dst, &fl))
1408 goto out;
1409
1410 if (final_p)
1411 ipv6_addr_copy(&fl.fl6_dst, final_p);
1412
1413 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1414 goto out;
1415 }
1416
1417 newsk = tcp_create_openreq_child(sk, req, skb);
1418 if (newsk == NULL)
1419 goto out;
1420
1421 /*
1422 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1423 * count here, tcp_create_openreq_child now does this for us, see the
1424 * comment in that function for the gory details. -acme
1425 */
1426
1427 newsk->sk_gso_type = SKB_GSO_TCPV6;
1428 __ip6_dst_store(newsk, dst, NULL, NULL);
1429
1430 newtcp6sk = (struct tcp6_sock *)newsk;
1431 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1432
1433 newtp = tcp_sk(newsk);
1434 newinet = inet_sk(newsk);
1435 newnp = inet6_sk(newsk);
1436
1437 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1438
1439 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1440 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1441 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1442 newsk->sk_bound_dev_if = treq->iif;
1443
1444 /* Now IPv6 options...
1445
1446 First: no IPv4 options.
1447 */
1448 newinet->opt = NULL;
1449 newnp->ipv6_fl_list = NULL;
1450
1451 /* Clone RX bits */
1452 newnp->rxopt.all = np->rxopt.all;
1453
1454 /* Clone pktoptions received with SYN */
1455 newnp->pktoptions = NULL;
1456 if (treq->pktopts != NULL) {
1457 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1458 kfree_skb(treq->pktopts);
1459 treq->pktopts = NULL;
1460 if (newnp->pktoptions)
1461 skb_set_owner_r(newnp->pktoptions, newsk);
1462 }
1463 newnp->opt = NULL;
1464 newnp->mcast_oif = inet6_iif(skb);
1465 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1466
1467 /* Clone native IPv6 options from listening socket (if any)
1468
1469 Yes, keeping reference count would be much more clever,
1470 but we make one more one thing there: reattach optmem
1471 to newsk.
1472 */
1473 if (opt) {
1474 newnp->opt = ipv6_dup_options(newsk, opt);
1475 if (opt != np->opt)
1476 sock_kfree_s(sk, opt, opt->tot_len);
1477 }
1478
1479 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1480 if (newnp->opt)
1481 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1482 newnp->opt->opt_flen);
1483
1484 tcp_mtup_init(newsk);
1485 tcp_sync_mss(newsk, dst_mtu(dst));
1486 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1487 tcp_initialize_rcv_mss(newsk);
1488
1489 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1490
1491 #ifdef CONFIG_TCP_MD5SIG
1492 /* Copy over the MD5 key from the original socket */
1493 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1494 /* We're using one, so create a matching key
1495 * on the newsk structure. If we fail to get
1496 * memory, then we end up not copying the key
1497 * across. Shucks.
1498 */
1499 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1500 if (newkey != NULL)
1501 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1502 newkey, key->keylen);
1503 }
1504 #endif
1505
1506 __inet6_hash(&tcp_hashinfo, newsk);
1507 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1508
1509 return newsk;
1510
1511 out_overflow:
1512 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1513 out:
1514 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1515 if (opt && opt != np->opt)
1516 sock_kfree_s(sk, opt, opt->tot_len);
1517 dst_release(dst);
1518 return NULL;
1519 }
1520
1521 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1522 {
1523 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1524 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1525 &ipv6_hdr(skb)->daddr, skb->csum)) {
1526 skb->ip_summed = CHECKSUM_UNNECESSARY;
1527 return 0;
1528 }
1529 }
1530
1531 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1532 &ipv6_hdr(skb)->saddr,
1533 &ipv6_hdr(skb)->daddr, 0));
1534
1535 if (skb->len <= 76) {
1536 return __skb_checksum_complete(skb);
1537 }
1538 return 0;
1539 }
1540
1541 /* The socket must have it's spinlock held when we get
1542 * here.
1543 *
1544 * We have a potential double-lock case here, so even when
1545 * doing backlog processing we use the BH locking scheme.
1546 * This is because we cannot sleep with the original spinlock
1547 * held.
1548 */
1549 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1550 {
1551 struct ipv6_pinfo *np = inet6_sk(sk);
1552 struct tcp_sock *tp;
1553 struct sk_buff *opt_skb = NULL;
1554
1555 /* Imagine: socket is IPv6. IPv4 packet arrives,
1556 goes to IPv4 receive handler and backlogged.
1557 From backlog it always goes here. Kerboom...
1558 Fortunately, tcp_rcv_established and rcv_established
1559 handle them correctly, but it is not case with
1560 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1561 */
1562
1563 if (skb->protocol == htons(ETH_P_IP))
1564 return tcp_v4_do_rcv(sk, skb);
1565
1566 #ifdef CONFIG_TCP_MD5SIG
1567 if (tcp_v6_inbound_md5_hash (sk, skb))
1568 goto discard;
1569 #endif
1570
1571 if (sk_filter(sk, skb))
1572 goto discard;
1573
1574 /*
1575 * socket locking is here for SMP purposes as backlog rcv
1576 * is currently called with bh processing disabled.
1577 */
1578
1579 /* Do Stevens' IPV6_PKTOPTIONS.
1580
1581 Yes, guys, it is the only place in our code, where we
1582 may make it not affecting IPv4.
1583 The rest of code is protocol independent,
1584 and I do not like idea to uglify IPv4.
1585
1586 Actually, all the idea behind IPV6_PKTOPTIONS
1587 looks not very well thought. For now we latch
1588 options, received in the last packet, enqueued
1589 by tcp. Feel free to propose better solution.
1590 --ANK (980728)
1591 */
1592 if (np->rxopt.all)
1593 opt_skb = skb_clone(skb, GFP_ATOMIC);
1594
1595 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1596 TCP_CHECK_TIMER(sk);
1597 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1598 goto reset;
1599 TCP_CHECK_TIMER(sk);
1600 if (opt_skb)
1601 goto ipv6_pktoptions;
1602 return 0;
1603 }
1604
1605 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1606 goto csum_err;
1607
1608 if (sk->sk_state == TCP_LISTEN) {
1609 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1610 if (!nsk)
1611 goto discard;
1612
1613 /*
1614 * Queue it on the new socket if the new socket is active,
1615 * otherwise we just shortcircuit this and continue with
1616 * the new socket..
1617 */
1618 if(nsk != sk) {
1619 if (tcp_child_process(sk, nsk, skb))
1620 goto reset;
1621 if (opt_skb)
1622 __kfree_skb(opt_skb);
1623 return 0;
1624 }
1625 }
1626
1627 TCP_CHECK_TIMER(sk);
1628 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1629 goto reset;
1630 TCP_CHECK_TIMER(sk);
1631 if (opt_skb)
1632 goto ipv6_pktoptions;
1633 return 0;
1634
1635 reset:
1636 tcp_v6_send_reset(sk, skb);
1637 discard:
1638 if (opt_skb)
1639 __kfree_skb(opt_skb);
1640 kfree_skb(skb);
1641 return 0;
1642 csum_err:
1643 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1644 goto discard;
1645
1646
1647 ipv6_pktoptions:
1648 /* Do you ask, what is it?
1649
1650 1. skb was enqueued by tcp.
1651 2. skb is added to tail of read queue, rather than out of order.
1652 3. socket is not in passive state.
1653 4. Finally, it really contains options, which user wants to receive.
1654 */
1655 tp = tcp_sk(sk);
1656 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1657 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1658 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1659 np->mcast_oif = inet6_iif(opt_skb);
1660 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1661 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1662 if (ipv6_opt_accepted(sk, opt_skb)) {
1663 skb_set_owner_r(opt_skb, sk);
1664 opt_skb = xchg(&np->pktoptions, opt_skb);
1665 } else {
1666 __kfree_skb(opt_skb);
1667 opt_skb = xchg(&np->pktoptions, NULL);
1668 }
1669 }
1670
1671 if (opt_skb)
1672 kfree_skb(opt_skb);
1673 return 0;
1674 }
1675
1676 static int tcp_v6_rcv(struct sk_buff *skb)
1677 {
1678 struct tcphdr *th;
1679 struct sock *sk;
1680 int ret;
1681
1682 if (skb->pkt_type != PACKET_HOST)
1683 goto discard_it;
1684
1685 /*
1686 * Count it even if it's bad.
1687 */
1688 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1689
1690 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1691 goto discard_it;
1692
1693 th = tcp_hdr(skb);
1694
1695 if (th->doff < sizeof(struct tcphdr)/4)
1696 goto bad_packet;
1697 if (!pskb_may_pull(skb, th->doff*4))
1698 goto discard_it;
1699
1700 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1701 goto bad_packet;
1702
1703 th = tcp_hdr(skb);
1704 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1705 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1706 skb->len - th->doff*4);
1707 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1708 TCP_SKB_CB(skb)->when = 0;
1709 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1710 TCP_SKB_CB(skb)->sacked = 0;
1711
1712 sk = __inet6_lookup(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source,
1713 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1714 inet6_iif(skb));
1715
1716 if (!sk)
1717 goto no_tcp_socket;
1718
1719 process:
1720 if (sk->sk_state == TCP_TIME_WAIT)
1721 goto do_time_wait;
1722
1723 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1724 goto discard_and_relse;
1725
1726 if (sk_filter(sk, skb))
1727 goto discard_and_relse;
1728
1729 skb->dev = NULL;
1730
1731 bh_lock_sock_nested(sk);
1732 ret = 0;
1733 if (!sock_owned_by_user(sk)) {
1734 #ifdef CONFIG_NET_DMA
1735 struct tcp_sock *tp = tcp_sk(sk);
1736 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1737 tp->ucopy.dma_chan = get_softnet_dma();
1738 if (tp->ucopy.dma_chan)
1739 ret = tcp_v6_do_rcv(sk, skb);
1740 else
1741 #endif
1742 {
1743 if (!tcp_prequeue(sk, skb))
1744 ret = tcp_v6_do_rcv(sk, skb);
1745 }
1746 } else
1747 sk_add_backlog(sk, skb);
1748 bh_unlock_sock(sk);
1749
1750 sock_put(sk);
1751 return ret ? -1 : 0;
1752
1753 no_tcp_socket:
1754 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1755 goto discard_it;
1756
1757 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1758 bad_packet:
1759 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1760 } else {
1761 tcp_v6_send_reset(NULL, skb);
1762 }
1763
1764 discard_it:
1765
1766 /*
1767 * Discard frame
1768 */
1769
1770 kfree_skb(skb);
1771 return 0;
1772
1773 discard_and_relse:
1774 sock_put(sk);
1775 goto discard_it;
1776
1777 do_time_wait:
1778 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1779 inet_twsk_put(inet_twsk(sk));
1780 goto discard_it;
1781 }
1782
1783 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1784 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1785 inet_twsk_put(inet_twsk(sk));
1786 goto discard_it;
1787 }
1788
1789 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1790 case TCP_TW_SYN:
1791 {
1792 struct sock *sk2;
1793
1794 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1795 &ipv6_hdr(skb)->daddr,
1796 ntohs(th->dest), inet6_iif(skb));
1797 if (sk2 != NULL) {
1798 struct inet_timewait_sock *tw = inet_twsk(sk);
1799 inet_twsk_deschedule(tw, &tcp_death_row);
1800 inet_twsk_put(tw);
1801 sk = sk2;
1802 goto process;
1803 }
1804 /* Fall through to ACK */
1805 }
1806 case TCP_TW_ACK:
1807 tcp_v6_timewait_ack(sk, skb);
1808 break;
1809 case TCP_TW_RST:
1810 goto no_tcp_socket;
1811 case TCP_TW_SUCCESS:;
1812 }
1813 goto discard_it;
1814 }
1815
1816 static int tcp_v6_remember_stamp(struct sock *sk)
1817 {
1818 /* Alas, not yet... */
1819 return 0;
1820 }
1821
1822 static struct inet_connection_sock_af_ops ipv6_specific = {
1823 .queue_xmit = inet6_csk_xmit,
1824 .send_check = tcp_v6_send_check,
1825 .rebuild_header = inet6_sk_rebuild_header,
1826 .conn_request = tcp_v6_conn_request,
1827 .syn_recv_sock = tcp_v6_syn_recv_sock,
1828 .remember_stamp = tcp_v6_remember_stamp,
1829 .net_header_len = sizeof(struct ipv6hdr),
1830 .setsockopt = ipv6_setsockopt,
1831 .getsockopt = ipv6_getsockopt,
1832 .addr2sockaddr = inet6_csk_addr2sockaddr,
1833 .sockaddr_len = sizeof(struct sockaddr_in6),
1834 #ifdef CONFIG_COMPAT
1835 .compat_setsockopt = compat_ipv6_setsockopt,
1836 .compat_getsockopt = compat_ipv6_getsockopt,
1837 #endif
1838 };
1839
1840 #ifdef CONFIG_TCP_MD5SIG
1841 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1842 .md5_lookup = tcp_v6_md5_lookup,
1843 .calc_md5_hash = tcp_v6_calc_md5_hash,
1844 .md5_add = tcp_v6_md5_add_func,
1845 .md5_parse = tcp_v6_parse_md5_keys,
1846 };
1847 #endif
1848
1849 /*
1850 * TCP over IPv4 via INET6 API
1851 */
1852
1853 static struct inet_connection_sock_af_ops ipv6_mapped = {
1854 .queue_xmit = ip_queue_xmit,
1855 .send_check = tcp_v4_send_check,
1856 .rebuild_header = inet_sk_rebuild_header,
1857 .conn_request = tcp_v6_conn_request,
1858 .syn_recv_sock = tcp_v6_syn_recv_sock,
1859 .remember_stamp = tcp_v4_remember_stamp,
1860 .net_header_len = sizeof(struct iphdr),
1861 .setsockopt = ipv6_setsockopt,
1862 .getsockopt = ipv6_getsockopt,
1863 .addr2sockaddr = inet6_csk_addr2sockaddr,
1864 .sockaddr_len = sizeof(struct sockaddr_in6),
1865 #ifdef CONFIG_COMPAT
1866 .compat_setsockopt = compat_ipv6_setsockopt,
1867 .compat_getsockopt = compat_ipv6_getsockopt,
1868 #endif
1869 };
1870
1871 #ifdef CONFIG_TCP_MD5SIG
1872 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1873 .md5_lookup = tcp_v4_md5_lookup,
1874 .calc_md5_hash = tcp_v4_calc_md5_hash,
1875 .md5_add = tcp_v6_md5_add_func,
1876 .md5_parse = tcp_v6_parse_md5_keys,
1877 };
1878 #endif
1879
1880 /* NOTE: A lot of things set to zero explicitly by call to
1881 * sk_alloc() so need not be done here.
1882 */
1883 static int tcp_v6_init_sock(struct sock *sk)
1884 {
1885 struct inet_connection_sock *icsk = inet_csk(sk);
1886 struct tcp_sock *tp = tcp_sk(sk);
1887
1888 skb_queue_head_init(&tp->out_of_order_queue);
1889 tcp_init_xmit_timers(sk);
1890 tcp_prequeue_init(tp);
1891
1892 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1893 tp->mdev = TCP_TIMEOUT_INIT;
1894
1895 /* So many TCP implementations out there (incorrectly) count the
1896 * initial SYN frame in their delayed-ACK and congestion control
1897 * algorithms that we must have the following bandaid to talk
1898 * efficiently to them. -DaveM
1899 */
1900 tp->snd_cwnd = 2;
1901
1902 /* See draft-stevens-tcpca-spec-01 for discussion of the
1903 * initialization of these values.
1904 */
1905 tp->snd_ssthresh = 0x7fffffff;
1906 tp->snd_cwnd_clamp = ~0;
1907 tp->mss_cache = 536;
1908
1909 tp->reordering = sysctl_tcp_reordering;
1910
1911 sk->sk_state = TCP_CLOSE;
1912
1913 icsk->icsk_af_ops = &ipv6_specific;
1914 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1915 icsk->icsk_sync_mss = tcp_sync_mss;
1916 sk->sk_write_space = sk_stream_write_space;
1917 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1918
1919 #ifdef CONFIG_TCP_MD5SIG
1920 tp->af_specific = &tcp_sock_ipv6_specific;
1921 #endif
1922
1923 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1924 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1925
1926 atomic_inc(&tcp_sockets_allocated);
1927
1928 return 0;
1929 }
1930
1931 static int tcp_v6_destroy_sock(struct sock *sk)
1932 {
1933 #ifdef CONFIG_TCP_MD5SIG
1934 /* Clean up the MD5 key list */
1935 if (tcp_sk(sk)->md5sig_info)
1936 tcp_v6_clear_md5_list(sk);
1937 #endif
1938 tcp_v4_destroy_sock(sk);
1939 return inet6_destroy_sock(sk);
1940 }
1941
1942 #ifdef CONFIG_PROC_FS
1943 /* Proc filesystem TCPv6 sock list dumping. */
1944 static void get_openreq6(struct seq_file *seq,
1945 struct sock *sk, struct request_sock *req, int i, int uid)
1946 {
1947 int ttd = req->expires - jiffies;
1948 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1949 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1950
1951 if (ttd < 0)
1952 ttd = 0;
1953
1954 seq_printf(seq,
1955 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1956 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1957 i,
1958 src->s6_addr32[0], src->s6_addr32[1],
1959 src->s6_addr32[2], src->s6_addr32[3],
1960 ntohs(inet_sk(sk)->sport),
1961 dest->s6_addr32[0], dest->s6_addr32[1],
1962 dest->s6_addr32[2], dest->s6_addr32[3],
1963 ntohs(inet_rsk(req)->rmt_port),
1964 TCP_SYN_RECV,
1965 0,0, /* could print option size, but that is af dependent. */
1966 1, /* timers active (only the expire timer) */
1967 jiffies_to_clock_t(ttd),
1968 req->retrans,
1969 uid,
1970 0, /* non standard timer */
1971 0, /* open_requests have no inode */
1972 0, req);
1973 }
1974
1975 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1976 {
1977 struct in6_addr *dest, *src;
1978 __u16 destp, srcp;
1979 int timer_active;
1980 unsigned long timer_expires;
1981 struct inet_sock *inet = inet_sk(sp);
1982 struct tcp_sock *tp = tcp_sk(sp);
1983 const struct inet_connection_sock *icsk = inet_csk(sp);
1984 struct ipv6_pinfo *np = inet6_sk(sp);
1985
1986 dest = &np->daddr;
1987 src = &np->rcv_saddr;
1988 destp = ntohs(inet->dport);
1989 srcp = ntohs(inet->sport);
1990
1991 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1992 timer_active = 1;
1993 timer_expires = icsk->icsk_timeout;
1994 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1995 timer_active = 4;
1996 timer_expires = icsk->icsk_timeout;
1997 } else if (timer_pending(&sp->sk_timer)) {
1998 timer_active = 2;
1999 timer_expires = sp->sk_timer.expires;
2000 } else {
2001 timer_active = 0;
2002 timer_expires = jiffies;
2003 }
2004
2005 seq_printf(seq,
2006 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2007 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2008 i,
2009 src->s6_addr32[0], src->s6_addr32[1],
2010 src->s6_addr32[2], src->s6_addr32[3], srcp,
2011 dest->s6_addr32[0], dest->s6_addr32[1],
2012 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2013 sp->sk_state,
2014 tp->write_seq-tp->snd_una,
2015 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2016 timer_active,
2017 jiffies_to_clock_t(timer_expires - jiffies),
2018 icsk->icsk_retransmits,
2019 sock_i_uid(sp),
2020 icsk->icsk_probes_out,
2021 sock_i_ino(sp),
2022 atomic_read(&sp->sk_refcnt), sp,
2023 icsk->icsk_rto,
2024 icsk->icsk_ack.ato,
2025 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2026 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2027 );
2028 }
2029
2030 static void get_timewait6_sock(struct seq_file *seq,
2031 struct inet_timewait_sock *tw, int i)
2032 {
2033 struct in6_addr *dest, *src;
2034 __u16 destp, srcp;
2035 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2036 int ttd = tw->tw_ttd - jiffies;
2037
2038 if (ttd < 0)
2039 ttd = 0;
2040
2041 dest = &tw6->tw_v6_daddr;
2042 src = &tw6->tw_v6_rcv_saddr;
2043 destp = ntohs(tw->tw_dport);
2044 srcp = ntohs(tw->tw_sport);
2045
2046 seq_printf(seq,
2047 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2048 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2049 i,
2050 src->s6_addr32[0], src->s6_addr32[1],
2051 src->s6_addr32[2], src->s6_addr32[3], srcp,
2052 dest->s6_addr32[0], dest->s6_addr32[1],
2053 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2054 tw->tw_substate, 0, 0,
2055 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2056 atomic_read(&tw->tw_refcnt), tw);
2057 }
2058
2059 static int tcp6_seq_show(struct seq_file *seq, void *v)
2060 {
2061 struct tcp_iter_state *st;
2062
2063 if (v == SEQ_START_TOKEN) {
2064 seq_puts(seq,
2065 " sl "
2066 "local_address "
2067 "remote_address "
2068 "st tx_queue rx_queue tr tm->when retrnsmt"
2069 " uid timeout inode\n");
2070 goto out;
2071 }
2072 st = seq->private;
2073
2074 switch (st->state) {
2075 case TCP_SEQ_STATE_LISTENING:
2076 case TCP_SEQ_STATE_ESTABLISHED:
2077 get_tcp6_sock(seq, v, st->num);
2078 break;
2079 case TCP_SEQ_STATE_OPENREQ:
2080 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2081 break;
2082 case TCP_SEQ_STATE_TIME_WAIT:
2083 get_timewait6_sock(seq, v, st->num);
2084 break;
2085 }
2086 out:
2087 return 0;
2088 }
2089
2090 static struct file_operations tcp6_seq_fops;
2091 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2092 .owner = THIS_MODULE,
2093 .name = "tcp6",
2094 .family = AF_INET6,
2095 .seq_show = tcp6_seq_show,
2096 .seq_fops = &tcp6_seq_fops,
2097 };
2098
2099 int __init tcp6_proc_init(void)
2100 {
2101 return tcp_proc_register(&tcp6_seq_afinfo);
2102 }
2103
2104 void tcp6_proc_exit(void)
2105 {
2106 tcp_proc_unregister(&tcp6_seq_afinfo);
2107 }
2108 #endif
2109
2110 struct proto tcpv6_prot = {
2111 .name = "TCPv6",
2112 .owner = THIS_MODULE,
2113 .close = tcp_close,
2114 .connect = tcp_v6_connect,
2115 .disconnect = tcp_disconnect,
2116 .accept = inet_csk_accept,
2117 .ioctl = tcp_ioctl,
2118 .init = tcp_v6_init_sock,
2119 .destroy = tcp_v6_destroy_sock,
2120 .shutdown = tcp_shutdown,
2121 .setsockopt = tcp_setsockopt,
2122 .getsockopt = tcp_getsockopt,
2123 .recvmsg = tcp_recvmsg,
2124 .backlog_rcv = tcp_v6_do_rcv,
2125 .hash = tcp_v6_hash,
2126 .unhash = tcp_unhash,
2127 .get_port = tcp_v6_get_port,
2128 .enter_memory_pressure = tcp_enter_memory_pressure,
2129 .sockets_allocated = &tcp_sockets_allocated,
2130 .memory_allocated = &tcp_memory_allocated,
2131 .memory_pressure = &tcp_memory_pressure,
2132 .orphan_count = &tcp_orphan_count,
2133 .sysctl_mem = sysctl_tcp_mem,
2134 .sysctl_wmem = sysctl_tcp_wmem,
2135 .sysctl_rmem = sysctl_tcp_rmem,
2136 .max_header = MAX_TCP_HEADER,
2137 .obj_size = sizeof(struct tcp6_sock),
2138 .twsk_prot = &tcp6_timewait_sock_ops,
2139 .rsk_prot = &tcp6_request_sock_ops,
2140 #ifdef CONFIG_COMPAT
2141 .compat_setsockopt = compat_tcp_setsockopt,
2142 .compat_getsockopt = compat_tcp_getsockopt,
2143 #endif
2144 };
2145
2146 static struct inet6_protocol tcpv6_protocol = {
2147 .handler = tcp_v6_rcv,
2148 .err_handler = tcp_v6_err,
2149 .gso_send_check = tcp_v6_gso_send_check,
2150 .gso_segment = tcp_tso_segment,
2151 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2152 };
2153
2154 static struct inet_protosw tcpv6_protosw = {
2155 .type = SOCK_STREAM,
2156 .protocol = IPPROTO_TCP,
2157 .prot = &tcpv6_prot,
2158 .ops = &inet6_stream_ops,
2159 .capability = -1,
2160 .no_check = 0,
2161 .flags = INET_PROTOSW_PERMANENT |
2162 INET_PROTOSW_ICSK,
2163 };
2164
2165 void __init tcpv6_init(void)
2166 {
2167 /* register inet6 protocol */
2168 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2169 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2170 inet6_register_protosw(&tcpv6_protosw);
2171
2172 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
2173 IPPROTO_TCP) < 0)
2174 panic("Failed to create the TCPv6 control socket.\n");
2175 }