net: Orphan and de-dst skbs earlier in xmit path.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
1da177e4
LT
64
65#include <asm/uaccess.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
cfb6eeb4
YH
70#include <linux/crypto.h>
71#include <linux/scatterlist.h>
72
cfb6eeb4 73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
74static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
1da177e4
LT
76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
8ad50d96
HX
78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr,
80 struct in6_addr *daddr);
1da177e4 81
3b401a81
SH
82static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 84#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
85static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
87#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 struct in6_addr *addr)
90{
91 return NULL;
92}
a928630a 93#endif
1da177e4 94
1da177e4
LT
95static void tcp_v6_hash(struct sock *sk)
96{
97 if (sk->sk_state != TCP_CLOSE) {
8292a17a 98 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
99 tcp_prot.hash(sk);
100 return;
101 }
102 local_bh_disable();
9327f705 103 __inet6_hash(sk, NULL);
1da177e4
LT
104 local_bh_enable();
105 }
106}
107
684f2176 108static __inline__ __sum16 tcp_v6_check(int len,
1ab1457c
YH
109 struct in6_addr *saddr,
110 struct in6_addr *daddr,
868c86bc 111 __wsum base)
1da177e4
LT
112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114}
115
a94f723d 116static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 117{
0660e03f
ACM
118 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
1da177e4
LT
122}
123
1ab1457c 124static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
125 int addr_len)
126{
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 128 struct inet_sock *inet = inet_sk(sk);
d83d8461 129 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
133 struct flowi fl;
134 struct dst_entry *dst;
135 int addr_type;
136 int err;
137
1ab1457c 138 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
139 return -EINVAL;
140
1ab1457c 141 if (usin->sin6_family != AF_INET6)
1da177e4
LT
142 return(-EAFNOSUPPORT);
143
144 memset(&fl, 0, sizeof(fl));
145
146 if (np->sndflow) {
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
153 return -EINVAL;
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
156 }
157 }
158
159 /*
1ab1457c
YH
160 * connect() to INADDR_ANY means loopback (BSD'ism).
161 */
162
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168 if(addr_type & IPV6_ADDR_MULTICAST)
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
198
199 /*
200 * TCP over IPv4
201 */
202
203 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
d83d8461 216 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
218#ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220#endif
1da177e4
LT
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
d83d8461
ACM
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
228#ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230#endif
1da177e4
LT
231 goto failure;
232 } else {
c720c7e8
ED
233 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
234 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
235 &np->rcv_saddr);
1da177e4
LT
236 }
237
238 return err;
239 }
240
241 if (!ipv6_addr_any(&np->rcv_saddr))
242 saddr = &np->rcv_saddr;
243
244 fl.proto = IPPROTO_TCP;
245 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
246 ipv6_addr_copy(&fl.fl6_src,
247 (saddr ? saddr : &np->saddr));
248 fl.oif = sk->sk_bound_dev_if;
51953d5b 249 fl.mark = sk->sk_mark;
1da177e4 250 fl.fl_ip_dport = usin->sin6_port;
c720c7e8 251 fl.fl_ip_sport = inet->inet_sport;
1da177e4
LT
252
253 if (np->opt && np->opt->srcrt) {
254 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
255 ipv6_addr_copy(&final, &fl.fl6_dst);
256 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257 final_p = &final;
258 }
259
beb8d13b
VY
260 security_sk_classify_flow(sk, &fl);
261
1da177e4
LT
262 err = ip6_dst_lookup(sk, &dst, &fl);
263 if (err)
264 goto failure;
265 if (final_p)
266 ipv6_addr_copy(&fl.fl6_dst, final_p);
267
52479b62
AD
268 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
269 if (err < 0) {
14e50e57
DM
270 if (err == -EREMOTE)
271 err = ip6_dst_blackhole(sk, &dst, &fl);
272 if (err < 0)
273 goto failure;
274 }
1da177e4
LT
275
276 if (saddr == NULL) {
277 saddr = &fl.fl6_src;
278 ipv6_addr_copy(&np->rcv_saddr, saddr);
279 }
280
281 /* set the source address */
282 ipv6_addr_copy(&np->saddr, saddr);
c720c7e8 283 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 284
f83ef8c0 285 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 286 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 287
d83d8461 288 icsk->icsk_ext_hdr_len = 0;
1da177e4 289 if (np->opt)
d83d8461
ACM
290 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
291 np->opt->opt_nflen);
1da177e4
LT
292
293 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
294
c720c7e8 295 inet->inet_dport = usin->sin6_port;
1da177e4
LT
296
297 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 298 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
299 if (err)
300 goto late_failure;
301
302 if (!tp->write_seq)
303 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
304 np->daddr.s6_addr32,
c720c7e8
ED
305 inet->inet_sport,
306 inet->inet_dport);
1da177e4
LT
307
308 err = tcp_connect(sk);
309 if (err)
310 goto late_failure;
311
312 return 0;
313
314late_failure:
315 tcp_set_state(sk, TCP_CLOSE);
316 __sk_dst_reset(sk);
317failure:
c720c7e8 318 inet->inet_dport = 0;
1da177e4
LT
319 sk->sk_route_caps = 0;
320 return err;
321}
322
323static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 324 u8 type, u8 code, int offset, __be32 info)
1da177e4
LT
325{
326 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 327 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
328 struct ipv6_pinfo *np;
329 struct sock *sk;
330 int err;
1ab1457c 331 struct tcp_sock *tp;
1da177e4 332 __u32 seq;
ca12a1a4 333 struct net *net = dev_net(skb->dev);
1da177e4 334
ca12a1a4 335 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 336 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
337
338 if (sk == NULL) {
e41b5368
DL
339 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS);
1da177e4
LT
341 return;
342 }
343
344 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 345 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
346 return;
347 }
348
349 bh_lock_sock(sk);
350 if (sock_owned_by_user(sk))
de0744af 351 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
352
353 if (sk->sk_state == TCP_CLOSE)
354 goto out;
355
356 tp = tcp_sk(sk);
1ab1457c 357 seq = ntohl(th->seq);
1da177e4
LT
358 if (sk->sk_state != TCP_LISTEN &&
359 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 360 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
361 goto out;
362 }
363
364 np = inet6_sk(sk);
365
366 if (type == ICMPV6_PKT_TOOBIG) {
367 struct dst_entry *dst = NULL;
368
369 if (sock_owned_by_user(sk))
370 goto out;
371 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
372 goto out;
373
374 /* icmp should have updated the destination cache entry */
375 dst = __sk_dst_check(sk, np->dst_cookie);
376
377 if (dst == NULL) {
378 struct inet_sock *inet = inet_sk(sk);
379 struct flowi fl;
380
381 /* BUGGG_FUTURE: Again, it is not clear how
382 to handle rthdr case. Ignore this complexity
383 for now.
384 */
385 memset(&fl, 0, sizeof(fl));
386 fl.proto = IPPROTO_TCP;
387 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
388 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
389 fl.oif = sk->sk_bound_dev_if;
51953d5b 390 fl.mark = sk->sk_mark;
c720c7e8
ED
391 fl.fl_ip_dport = inet->inet_dport;
392 fl.fl_ip_sport = inet->inet_sport;
beb8d13b 393 security_skb_classify_flow(skb, &fl);
1da177e4
LT
394
395 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
396 sk->sk_err_soft = -err;
397 goto out;
398 }
399
52479b62 400 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
1da177e4
LT
401 sk->sk_err_soft = -err;
402 goto out;
403 }
404
405 } else
406 dst_hold(dst);
407
d83d8461 408 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
409 tcp_sync_mss(sk, dst_mtu(dst));
410 tcp_simple_retransmit(sk);
411 } /* else let the usual retransmit timer handle it */
412 dst_release(dst);
413 goto out;
414 }
415
416 icmpv6_err_convert(type, code, &err);
417
60236fdd 418 /* Might be for an request_sock */
1da177e4 419 switch (sk->sk_state) {
60236fdd 420 struct request_sock *req, **prev;
1da177e4
LT
421 case TCP_LISTEN:
422 if (sock_owned_by_user(sk))
423 goto out;
424
8129765a
ACM
425 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
426 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
427 if (!req)
428 goto out;
429
430 /* ICMPs are not backlogged, hence we cannot get
431 * an established socket here.
432 */
547b792c 433 WARN_ON(req->sk != NULL);
1da177e4 434
2e6599cb 435 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 436 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
437 goto out;
438 }
439
463c84b9 440 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
441 goto out;
442
443 case TCP_SYN_SENT:
444 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 445 It can, it SYNs are crossed. --ANK */
1da177e4 446 if (!sock_owned_by_user(sk)) {
1da177e4
LT
447 sk->sk_err = err;
448 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
449
450 tcp_done(sk);
451 } else
452 sk->sk_err_soft = err;
453 goto out;
454 }
455
456 if (!sock_owned_by_user(sk) && np->recverr) {
457 sk->sk_err = err;
458 sk->sk_error_report(sk);
459 } else
460 sk->sk_err_soft = err;
461
462out:
463 bh_unlock_sock(sk);
464 sock_put(sk);
465}
466
467
e6b4d113
WAS
468static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
469 struct request_values *rvp)
1da177e4 470{
ca304b61 471 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
472 struct ipv6_pinfo *np = inet6_sk(sk);
473 struct sk_buff * skb;
474 struct ipv6_txoptions *opt = NULL;
475 struct in6_addr * final_p = NULL, final;
476 struct flowi fl;
fd80eb94 477 struct dst_entry *dst;
1da177e4
LT
478 int err = -1;
479
480 memset(&fl, 0, sizeof(fl));
481 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
482 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
483 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 484 fl.fl6_flowlabel = 0;
2e6599cb 485 fl.oif = treq->iif;
51953d5b 486 fl.mark = sk->sk_mark;
2e6599cb 487 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 488 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 489 security_req_classify_flow(req, &fl);
1da177e4 490
fd80eb94
DL
491 opt = np->opt;
492 if (opt && opt->srcrt) {
493 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
494 ipv6_addr_copy(&final, &fl.fl6_dst);
495 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
496 final_p = &final;
1da177e4
LT
497 }
498
fd80eb94
DL
499 err = ip6_dst_lookup(sk, &dst, &fl);
500 if (err)
501 goto done;
502 if (final_p)
503 ipv6_addr_copy(&fl.fl6_dst, final_p);
52479b62 504 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
fd80eb94
DL
505 goto done;
506
e6b4d113 507 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4 508 if (skb) {
8ad50d96 509 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
1da177e4 510
2e6599cb 511 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
4e15ed4d 512 err = ip6_xmit(sk, skb, &fl, opt);
b9df3cb8 513 err = net_xmit_eval(err);
1da177e4
LT
514 }
515
516done:
1ab1457c 517 if (opt && opt != np->opt)
1da177e4 518 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 519 dst_release(dst);
1da177e4
LT
520 return err;
521}
522
72659ecc
OP
523static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
524 struct request_values *rvp)
525{
526 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
527 return tcp_v6_send_synack(sk, req, rvp);
528}
529
c6aefafb
GG
530static inline void syn_flood_warning(struct sk_buff *skb)
531{
532#ifdef CONFIG_SYN_COOKIES
533 if (sysctl_tcp_syncookies)
534 printk(KERN_INFO
535 "TCPv6: Possible SYN flooding on port %d. "
536 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
537 else
538#endif
539 printk(KERN_INFO
540 "TCPv6: Possible SYN flooding on port %d. "
541 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
542}
543
60236fdd 544static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 545{
800d55f1 546 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
547}
548
cfb6eeb4
YH
549#ifdef CONFIG_TCP_MD5SIG
550static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
551 struct in6_addr *addr)
552{
553 struct tcp_sock *tp = tcp_sk(sk);
554 int i;
555
556 BUG_ON(tp == NULL);
557
558 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
559 return NULL;
560
561 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 562 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 563 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
564 }
565 return NULL;
566}
567
568static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
569 struct sock *addr_sk)
570{
571 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
572}
573
574static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
575 struct request_sock *req)
576{
577 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
578}
579
580static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
581 char *newkey, u8 newkeylen)
582{
583 /* Add key to the list */
b0a713e9 584 struct tcp_md5sig_key *key;
cfb6eeb4
YH
585 struct tcp_sock *tp = tcp_sk(sk);
586 struct tcp6_md5sig_key *keys;
587
b0a713e9 588 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
589 if (key) {
590 /* modify existing entry - just update that one */
b0a713e9
MD
591 kfree(key->key);
592 key->key = newkey;
593 key->keylen = newkeylen;
cfb6eeb4
YH
594 } else {
595 /* reallocate new list if current one is full. */
596 if (!tp->md5sig_info) {
597 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
598 if (!tp->md5sig_info) {
599 kfree(newkey);
600 return -ENOMEM;
601 }
3d7dbeac 602 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 603 }
aa133076 604 if (tcp_alloc_md5sig_pool(sk) == NULL) {
aacbe8c8
YH
605 kfree(newkey);
606 return -ENOMEM;
607 }
cfb6eeb4
YH
608 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
609 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
610 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
611
612 if (!keys) {
613 tcp_free_md5sig_pool();
614 kfree(newkey);
615 return -ENOMEM;
616 }
617
618 if (tp->md5sig_info->entries6)
619 memmove(keys, tp->md5sig_info->keys6,
620 (sizeof (tp->md5sig_info->keys6[0]) *
621 tp->md5sig_info->entries6));
622
623 kfree(tp->md5sig_info->keys6);
624 tp->md5sig_info->keys6 = keys;
625 tp->md5sig_info->alloced6++;
626 }
627
628 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
629 peer);
f8ab18d2
DM
630 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
631 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
632
633 tp->md5sig_info->entries6++;
634 }
635 return 0;
636}
637
638static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
639 u8 *newkey, __u8 newkeylen)
640{
641 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
642 newkey, newkeylen);
643}
644
645static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
646{
647 struct tcp_sock *tp = tcp_sk(sk);
648 int i;
649
650 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 651 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 652 /* Free the key */
f8ab18d2 653 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
654 tp->md5sig_info->entries6--;
655
656 if (tp->md5sig_info->entries6 == 0) {
657 kfree(tp->md5sig_info->keys6);
658 tp->md5sig_info->keys6 = NULL;
ca983cef 659 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
660 } else {
661 /* shrink the database */
662 if (tp->md5sig_info->entries6 != i)
663 memmove(&tp->md5sig_info->keys6[i],
664 &tp->md5sig_info->keys6[i+1],
665 (tp->md5sig_info->entries6 - i)
666 * sizeof (tp->md5sig_info->keys6[0]));
667 }
77adefdc
YH
668 tcp_free_md5sig_pool();
669 return 0;
cfb6eeb4
YH
670 }
671 }
672 return -ENOENT;
673}
674
675static void tcp_v6_clear_md5_list (struct sock *sk)
676{
677 struct tcp_sock *tp = tcp_sk(sk);
678 int i;
679
680 if (tp->md5sig_info->entries6) {
681 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 682 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
683 tp->md5sig_info->entries6 = 0;
684 tcp_free_md5sig_pool();
685 }
686
687 kfree(tp->md5sig_info->keys6);
688 tp->md5sig_info->keys6 = NULL;
689 tp->md5sig_info->alloced6 = 0;
690
691 if (tp->md5sig_info->entries4) {
692 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 693 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
694 tp->md5sig_info->entries4 = 0;
695 tcp_free_md5sig_pool();
696 }
697
698 kfree(tp->md5sig_info->keys4);
699 tp->md5sig_info->keys4 = NULL;
700 tp->md5sig_info->alloced4 = 0;
701}
702
703static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
704 int optlen)
705{
706 struct tcp_md5sig cmd;
707 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
708 u8 *newkey;
709
710 if (optlen < sizeof(cmd))
711 return -EINVAL;
712
713 if (copy_from_user(&cmd, optval, sizeof(cmd)))
714 return -EFAULT;
715
716 if (sin6->sin6_family != AF_INET6)
717 return -EINVAL;
718
719 if (!cmd.tcpm_keylen) {
720 if (!tcp_sk(sk)->md5sig_info)
721 return -ENOENT;
e773e4fa 722 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
723 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
724 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
725 }
726
727 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
728 return -EINVAL;
729
730 if (!tcp_sk(sk)->md5sig_info) {
731 struct tcp_sock *tp = tcp_sk(sk);
732 struct tcp_md5sig_info *p;
733
734 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
735 if (!p)
736 return -ENOMEM;
737
738 tp->md5sig_info = p;
3d7dbeac 739 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
740 }
741
af879cc7 742 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
743 if (!newkey)
744 return -ENOMEM;
e773e4fa 745 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
746 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
747 newkey, cmd.tcpm_keylen);
748 }
749 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
750}
751
49a72dfb
AL
752static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
753 struct in6_addr *daddr,
754 struct in6_addr *saddr, int nbytes)
cfb6eeb4 755{
cfb6eeb4 756 struct tcp6_pseudohdr *bp;
49a72dfb 757 struct scatterlist sg;
8d26d76d 758
cfb6eeb4 759 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
760 /* 1. TCP pseudo-header (RFC2460) */
761 ipv6_addr_copy(&bp->saddr, saddr);
762 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 763 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 764 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 765
49a72dfb
AL
766 sg_init_one(&sg, bp, sizeof(*bp));
767 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
768}
c7da57a1 769
49a72dfb
AL
770static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
771 struct in6_addr *daddr, struct in6_addr *saddr,
772 struct tcphdr *th)
773{
774 struct tcp_md5sig_pool *hp;
775 struct hash_desc *desc;
776
777 hp = tcp_get_md5sig_pool();
778 if (!hp)
779 goto clear_hash_noput;
780 desc = &hp->md5_desc;
781
782 if (crypto_hash_init(desc))
783 goto clear_hash;
784 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
785 goto clear_hash;
786 if (tcp_md5_hash_header(hp, th))
787 goto clear_hash;
788 if (tcp_md5_hash_key(hp, key))
789 goto clear_hash;
790 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 791 goto clear_hash;
cfb6eeb4 792
cfb6eeb4 793 tcp_put_md5sig_pool();
cfb6eeb4 794 return 0;
49a72dfb 795
cfb6eeb4
YH
796clear_hash:
797 tcp_put_md5sig_pool();
798clear_hash_noput:
799 memset(md5_hash, 0, 16);
49a72dfb 800 return 1;
cfb6eeb4
YH
801}
802
49a72dfb
AL
803static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
804 struct sock *sk, struct request_sock *req,
805 struct sk_buff *skb)
cfb6eeb4
YH
806{
807 struct in6_addr *saddr, *daddr;
49a72dfb
AL
808 struct tcp_md5sig_pool *hp;
809 struct hash_desc *desc;
810 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
811
812 if (sk) {
813 saddr = &inet6_sk(sk)->saddr;
814 daddr = &inet6_sk(sk)->daddr;
49a72dfb 815 } else if (req) {
cfb6eeb4
YH
816 saddr = &inet6_rsk(req)->loc_addr;
817 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb
AL
818 } else {
819 struct ipv6hdr *ip6h = ipv6_hdr(skb);
820 saddr = &ip6h->saddr;
821 daddr = &ip6h->daddr;
cfb6eeb4 822 }
49a72dfb
AL
823
824 hp = tcp_get_md5sig_pool();
825 if (!hp)
826 goto clear_hash_noput;
827 desc = &hp->md5_desc;
828
829 if (crypto_hash_init(desc))
830 goto clear_hash;
831
832 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
833 goto clear_hash;
834 if (tcp_md5_hash_header(hp, th))
835 goto clear_hash;
836 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
837 goto clear_hash;
838 if (tcp_md5_hash_key(hp, key))
839 goto clear_hash;
840 if (crypto_hash_final(desc, md5_hash))
841 goto clear_hash;
842
843 tcp_put_md5sig_pool();
844 return 0;
845
846clear_hash:
847 tcp_put_md5sig_pool();
848clear_hash_noput:
849 memset(md5_hash, 0, 16);
850 return 1;
cfb6eeb4
YH
851}
852
853static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
854{
855 __u8 *hash_location = NULL;
856 struct tcp_md5sig_key *hash_expected;
0660e03f 857 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 858 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 859 int genhash;
cfb6eeb4
YH
860 u8 newhash[16];
861
862 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 863 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 864
785957d3
DM
865 /* We've parsed the options - do we have a hash? */
866 if (!hash_expected && !hash_location)
867 return 0;
868
869 if (hash_expected && !hash_location) {
870 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
871 return 1;
872 }
873
785957d3
DM
874 if (!hash_expected && hash_location) {
875 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
876 return 1;
877 }
878
879 /* check the signature */
49a72dfb
AL
880 genhash = tcp_v6_md5_hash_skb(newhash,
881 hash_expected,
882 NULL, NULL, skb);
883
cfb6eeb4
YH
884 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
885 if (net_ratelimit()) {
5856b606 886 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
cfb6eeb4 887 genhash ? "failed" : "mismatch",
0c6ce78a
HH
888 &ip6h->saddr, ntohs(th->source),
889 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
890 }
891 return 1;
892 }
893 return 0;
894}
895#endif
896
c6aefafb 897struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 898 .family = AF_INET6,
2e6599cb 899 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 900 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
901 .send_ack = tcp_v6_reqsk_send_ack,
902 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
903 .send_reset = tcp_v6_send_reset,
904 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
905};
906
cfb6eeb4 907#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 908static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 909 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 910 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 911};
b6332e6c 912#endif
cfb6eeb4 913
6d6ee43e
ACM
914static struct timewait_sock_ops tcp6_timewait_sock_ops = {
915 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
916 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 917 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
918};
919
8ad50d96
HX
920static void __tcp_v6_send_check(struct sk_buff *skb,
921 struct in6_addr *saddr, struct in6_addr *daddr)
1da177e4 922{
aa8223c7 923 struct tcphdr *th = tcp_hdr(skb);
1da177e4 924
84fa7933 925 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8ad50d96 926 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
663ead3b 927 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 928 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 929 } else {
8ad50d96
HX
930 th->check = tcp_v6_check(skb->len, saddr, daddr,
931 csum_partial(th, th->doff << 2,
932 skb->csum));
1da177e4
LT
933 }
934}
935
bb296246 936static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
8ad50d96
HX
937{
938 struct ipv6_pinfo *np = inet6_sk(sk);
939
940 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
941}
942
a430a43d
HX
943static int tcp_v6_gso_send_check(struct sk_buff *skb)
944{
945 struct ipv6hdr *ipv6h;
946 struct tcphdr *th;
947
948 if (!pskb_may_pull(skb, sizeof(*th)))
949 return -EINVAL;
950
0660e03f 951 ipv6h = ipv6_hdr(skb);
aa8223c7 952 th = tcp_hdr(skb);
a430a43d
HX
953
954 th->check = 0;
84fa7933 955 skb->ip_summed = CHECKSUM_PARTIAL;
8ad50d96 956 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
a430a43d
HX
957 return 0;
958}
1da177e4 959
36990673
HX
960static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
961 struct sk_buff *skb)
684f2176 962{
36e7b1b8 963 struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
964
965 switch (skb->ip_summed) {
966 case CHECKSUM_COMPLETE:
86911732 967 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
968 skb->csum)) {
969 skb->ip_summed = CHECKSUM_UNNECESSARY;
970 break;
971 }
972
973 /* fall through */
974 case CHECKSUM_NONE:
975 NAPI_GRO_CB(skb)->flush = 1;
976 return NULL;
977 }
978
979 return tcp_gro_receive(head, skb);
980}
684f2176 981
36990673 982static int tcp6_gro_complete(struct sk_buff *skb)
684f2176
HX
983{
984 struct ipv6hdr *iph = ipv6_hdr(skb);
985 struct tcphdr *th = tcp_hdr(skb);
986
987 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
988 &iph->saddr, &iph->daddr, 0);
989 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
990
991 return tcp_gro_complete(skb);
992}
684f2176 993
626e264d
IJ
994static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
995 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 996{
aa8223c7 997 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
998 struct sk_buff *buff;
999 struct flowi fl;
adf30907 1000 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 1001 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 1002 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 1003 struct dst_entry *dst;
81ada62d 1004 __be32 *topt;
1da177e4 1005
626e264d
IJ
1006 if (ts)
1007 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 1008#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1009 if (key)
1010 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1011#endif
1012
cfb6eeb4 1013 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 1014 GFP_ATOMIC);
1ab1457c
YH
1015 if (buff == NULL)
1016 return;
1da177e4 1017
cfb6eeb4 1018 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1019
cfb6eeb4 1020 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 1021 skb_reset_transport_header(buff);
1da177e4
LT
1022
1023 /* Swap the send and the receive. */
1024 memset(t1, 0, sizeof(*t1));
1025 t1->dest = th->source;
1026 t1->source = th->dest;
cfb6eeb4 1027 t1->doff = tot_len / 4;
626e264d
IJ
1028 t1->seq = htonl(seq);
1029 t1->ack_seq = htonl(ack);
1030 t1->ack = !rst || !th->ack;
1031 t1->rst = rst;
1032 t1->window = htons(win);
1da177e4 1033
81ada62d
IJ
1034 topt = (__be32 *)(t1 + 1);
1035
626e264d
IJ
1036 if (ts) {
1037 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1038 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1039 *topt++ = htonl(tcp_time_stamp);
1040 *topt++ = htonl(ts);
1041 }
1042
cfb6eeb4
YH
1043#ifdef CONFIG_TCP_MD5SIG
1044 if (key) {
81ada62d
IJ
1045 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1046 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1047 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1048 &ipv6_hdr(skb)->saddr,
1049 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1050 }
1051#endif
1052
1da177e4 1053 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1054 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1055 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4 1056
e5700aff
DM
1057 buff->ip_summed = CHECKSUM_PARTIAL;
1058 buff->csum = 0;
1059
8ad50d96 1060 __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
1da177e4
LT
1061
1062 fl.proto = IPPROTO_TCP;
505cbfc5 1063 fl.oif = inet6_iif(skb);
1da177e4
LT
1064 fl.fl_ip_dport = t1->dest;
1065 fl.fl_ip_sport = t1->source;
beb8d13b 1066 security_skb_classify_flow(skb, &fl);
1da177e4 1067
c20121ae
DL
1068 /* Pass a socket to ip6_dst_lookup either it is for RST
1069 * Underlying function will use this to retrieve the network
1070 * namespace
1071 */
adf30907
ED
1072 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1073 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1074 skb_dst_set(buff, dst);
4e15ed4d 1075 ip6_xmit(ctl_sk, buff, &fl, NULL);
63231bdd 1076 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
626e264d
IJ
1077 if (rst)
1078 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4 1079 return;
ecc51b6d 1080 }
1da177e4
LT
1081 }
1082
1083 kfree_skb(buff);
1084}
1085
626e264d 1086static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1087{
626e264d
IJ
1088 struct tcphdr *th = tcp_hdr(skb);
1089 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1090 struct tcp_md5sig_key *key = NULL;
1da177e4 1091
626e264d 1092 if (th->rst)
1da177e4
LT
1093 return;
1094
626e264d
IJ
1095 if (!ipv6_unicast_destination(skb))
1096 return;
1da177e4 1097
cfb6eeb4 1098#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1099 if (sk)
1100 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1101#endif
1102
626e264d
IJ
1103 if (th->ack)
1104 seq = ntohl(th->ack_seq);
1105 else
1106 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1107 (th->doff << 2);
1da177e4 1108
626e264d
IJ
1109 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1110}
1da177e4 1111
626e264d
IJ
1112static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1113 struct tcp_md5sig_key *key)
1114{
1115 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1116}
1117
1118static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1119{
8feaf0c0 1120 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1121 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1122
9501f972 1123 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1124 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1125 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1126
8feaf0c0 1127 inet_twsk_put(tw);
1da177e4
LT
1128}
1129
6edafaaf
GJ
1130static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1131 struct request_sock *req)
1da177e4 1132{
9501f972 1133 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1134 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1135}
1136
1137
1138static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1139{
60236fdd 1140 struct request_sock *req, **prev;
aa8223c7 1141 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1142 struct sock *nsk;
1143
1144 /* Find possible connection requests. */
8129765a 1145 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1146 &ipv6_hdr(skb)->saddr,
1147 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1148 if (req)
1149 return tcp_check_req(sk, skb, req, prev);
1150
3b1e0a65 1151 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1152 &ipv6_hdr(skb)->saddr, th->source,
1153 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1154
1155 if (nsk) {
1156 if (nsk->sk_state != TCP_TIME_WAIT) {
1157 bh_lock_sock(nsk);
1158 return nsk;
1159 }
9469c7b4 1160 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1161 return NULL;
1162 }
1163
c6aefafb 1164#ifdef CONFIG_SYN_COOKIES
1da177e4 1165 if (!th->rst && !th->syn && th->ack)
c6aefafb 1166 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1167#endif
1168 return sk;
1169}
1170
1da177e4
LT
1171/* FIXME: this is substantially similar to the ipv4 code.
1172 * Can some kind of merge be done? -- erics
1173 */
1174static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1175{
4957faad 1176 struct tcp_extend_values tmp_ext;
e6b4d113 1177 struct tcp_options_received tmp_opt;
4957faad 1178 u8 *hash_location;
e6b4d113 1179 struct request_sock *req;
ca304b61 1180 struct inet6_request_sock *treq;
1da177e4 1181 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1182 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1183 __u32 isn = TCP_SKB_CB(skb)->when;
c6aefafb
GG
1184#ifdef CONFIG_SYN_COOKIES
1185 int want_cookie = 0;
1186#else
1187#define want_cookie 0
1188#endif
1da177e4
LT
1189
1190 if (skb->protocol == htons(ETH_P_IP))
1191 return tcp_v4_conn_request(sk, skb);
1192
1193 if (!ipv6_unicast_destination(skb))
1ab1457c 1194 goto drop;
1da177e4 1195
463c84b9 1196 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1197 if (net_ratelimit())
c6aefafb
GG
1198 syn_flood_warning(skb);
1199#ifdef CONFIG_SYN_COOKIES
1200 if (sysctl_tcp_syncookies)
1201 want_cookie = 1;
1202 else
1203#endif
1ab1457c 1204 goto drop;
1da177e4
LT
1205 }
1206
463c84b9 1207 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1208 goto drop;
1209
ca304b61 1210 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1211 if (req == NULL)
1212 goto drop;
1213
cfb6eeb4
YH
1214#ifdef CONFIG_TCP_MD5SIG
1215 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1216#endif
1217
1da177e4
LT
1218 tcp_clear_options(&tmp_opt);
1219 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1220 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1221 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1222
1223 if (tmp_opt.cookie_plus > 0 &&
1224 tmp_opt.saw_tstamp &&
1225 !tp->rx_opt.cookie_out_never &&
1226 (sysctl_tcp_cookie_size > 0 ||
1227 (tp->cookie_values != NULL &&
1228 tp->cookie_values->cookie_desired > 0))) {
1229 u8 *c;
1230 u32 *d;
1231 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1232 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1233
1234 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1235 goto drop_and_free;
1236
1237 /* Secret recipe starts with IP addresses */
0eae88f3 1238 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
4957faad
WAS
1239 *mess++ ^= *d++;
1240 *mess++ ^= *d++;
1241 *mess++ ^= *d++;
1242 *mess++ ^= *d++;
0eae88f3 1243 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
4957faad
WAS
1244 *mess++ ^= *d++;
1245 *mess++ ^= *d++;
1246 *mess++ ^= *d++;
1247 *mess++ ^= *d++;
1248
1249 /* plus variable length Initiator Cookie */
1250 c = (u8 *)mess;
1251 while (l-- > 0)
1252 *c++ ^= *hash_location++;
1da177e4 1253
4957faad
WAS
1254#ifdef CONFIG_SYN_COOKIES
1255 want_cookie = 0; /* not our kind of cookie */
1256#endif
1257 tmp_ext.cookie_out_never = 0; /* false */
1258 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1259 } else if (!tp->rx_opt.cookie_in_always) {
1260 /* redundant indications, but ensure initialization. */
1261 tmp_ext.cookie_out_never = 1; /* true */
1262 tmp_ext.cookie_plus = 0;
1263 } else {
1264 goto drop_and_free;
1265 }
1266 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1267
4dfc2817 1268 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1269 tcp_clear_options(&tmp_opt);
c6aefafb 1270
1da177e4
LT
1271 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1272 tcp_openreq_init(req, &tmp_opt, skb);
1273
ca304b61 1274 treq = inet6_rsk(req);
0660e03f
ACM
1275 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1276 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
c6aefafb
GG
1277 if (!want_cookie)
1278 TCP_ECN_create_request(req, tcp_hdr(skb));
1279
1280 if (want_cookie) {
1281 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
4dfc2817 1282 req->cookie_ts = tmp_opt.tstamp_ok;
c6aefafb
GG
1283 } else if (!isn) {
1284 if (ipv6_opt_accepted(sk, skb) ||
1285 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1286 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1287 atomic_inc(&skb->users);
1288 treq->pktopts = skb;
1289 }
1290 treq->iif = sk->sk_bound_dev_if;
1da177e4 1291
c6aefafb
GG
1292 /* So that link locals have meaning */
1293 if (!sk->sk_bound_dev_if &&
1294 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1295 treq->iif = inet6_iif(skb);
1da177e4 1296
a94f723d 1297 isn = tcp_v6_init_sequence(skb);
c6aefafb 1298 }
2e6599cb 1299 tcp_rsk(req)->snt_isn = isn;
1da177e4 1300
4237c75c
VY
1301 security_inet_conn_request(sk, skb, req);
1302
4957faad
WAS
1303 if (tcp_v6_send_synack(sk, req,
1304 (struct request_values *)&tmp_ext) ||
1305 want_cookie)
e6b4d113 1306 goto drop_and_free;
1da177e4 1307
e6b4d113
WAS
1308 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1309 return 0;
1da177e4 1310
e6b4d113
WAS
1311drop_and_free:
1312 reqsk_free(req);
1da177e4 1313drop:
1da177e4
LT
1314 return 0; /* don't send reset */
1315}
1316
1317static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1318 struct request_sock *req,
1da177e4
LT
1319 struct dst_entry *dst)
1320{
78d15e82 1321 struct inet6_request_sock *treq;
1da177e4
LT
1322 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1323 struct tcp6_sock *newtcp6sk;
1324 struct inet_sock *newinet;
1325 struct tcp_sock *newtp;
1326 struct sock *newsk;
1327 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1328#ifdef CONFIG_TCP_MD5SIG
1329 struct tcp_md5sig_key *key;
1330#endif
1da177e4
LT
1331
1332 if (skb->protocol == htons(ETH_P_IP)) {
1333 /*
1334 * v6 mapped
1335 */
1336
1337 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1338
1ab1457c 1339 if (newsk == NULL)
1da177e4
LT
1340 return NULL;
1341
1342 newtcp6sk = (struct tcp6_sock *)newsk;
1343 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1344
1345 newinet = inet_sk(newsk);
1346 newnp = inet6_sk(newsk);
1347 newtp = tcp_sk(newsk);
1348
1349 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1350
c720c7e8 1351 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1352
c720c7e8 1353 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4
LT
1354
1355 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1356
8292a17a 1357 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1358 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1359#ifdef CONFIG_TCP_MD5SIG
1360 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1361#endif
1362
1da177e4
LT
1363 newnp->pktoptions = NULL;
1364 newnp->opt = NULL;
505cbfc5 1365 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1366 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1367
e6848976
ACM
1368 /*
1369 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1370 * here, tcp_create_openreq_child now does this for us, see the comment in
1371 * that function for the gory details. -acme
1da177e4 1372 */
1da177e4
LT
1373
1374 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1375 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1376 Sync it now.
1377 */
d83d8461 1378 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1379
1380 return newsk;
1381 }
1382
78d15e82 1383 treq = inet6_rsk(req);
1da177e4
LT
1384 opt = np->opt;
1385
1386 if (sk_acceptq_is_full(sk))
1387 goto out_overflow;
1388
1da177e4
LT
1389 if (dst == NULL) {
1390 struct in6_addr *final_p = NULL, final;
1391 struct flowi fl;
1392
1393 memset(&fl, 0, sizeof(fl));
1394 fl.proto = IPPROTO_TCP;
2e6599cb 1395 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1396 if (opt && opt->srcrt) {
1397 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1398 ipv6_addr_copy(&final, &fl.fl6_dst);
1399 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1400 final_p = &final;
1401 }
2e6599cb 1402 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1403 fl.oif = sk->sk_bound_dev_if;
51953d5b 1404 fl.mark = sk->sk_mark;
2e6599cb 1405 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 1406 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 1407 security_req_classify_flow(req, &fl);
1da177e4
LT
1408
1409 if (ip6_dst_lookup(sk, &dst, &fl))
1410 goto out;
1411
1412 if (final_p)
1413 ipv6_addr_copy(&fl.fl6_dst, final_p);
1414
52479b62 1415 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1da177e4 1416 goto out;
1ab1457c 1417 }
1da177e4
LT
1418
1419 newsk = tcp_create_openreq_child(sk, req, skb);
1420 if (newsk == NULL)
1421 goto out;
1422
e6848976
ACM
1423 /*
1424 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1425 * count here, tcp_create_openreq_child now does this for us, see the
1426 * comment in that function for the gory details. -acme
1427 */
1da177e4 1428
59eed279 1429 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1430 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1431
1432 newtcp6sk = (struct tcp6_sock *)newsk;
1433 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1434
1435 newtp = tcp_sk(newsk);
1436 newinet = inet_sk(newsk);
1437 newnp = inet6_sk(newsk);
1438
1439 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1440
2e6599cb
ACM
1441 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1442 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1443 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1444 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1445
1ab1457c 1446 /* Now IPv6 options...
1da177e4
LT
1447
1448 First: no IPv4 options.
1449 */
1450 newinet->opt = NULL;
d35690be 1451 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1452
1453 /* Clone RX bits */
1454 newnp->rxopt.all = np->rxopt.all;
1455
1456 /* Clone pktoptions received with SYN */
1457 newnp->pktoptions = NULL;
2e6599cb
ACM
1458 if (treq->pktopts != NULL) {
1459 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1460 kfree_skb(treq->pktopts);
1461 treq->pktopts = NULL;
1da177e4
LT
1462 if (newnp->pktoptions)
1463 skb_set_owner_r(newnp->pktoptions, newsk);
1464 }
1465 newnp->opt = NULL;
505cbfc5 1466 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1467 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1468
1469 /* Clone native IPv6 options from listening socket (if any)
1470
1471 Yes, keeping reference count would be much more clever,
1472 but we make one more one thing there: reattach optmem
1473 to newsk.
1474 */
1475 if (opt) {
1476 newnp->opt = ipv6_dup_options(newsk, opt);
1477 if (opt != np->opt)
1478 sock_kfree_s(sk, opt, opt->tot_len);
1479 }
1480
d83d8461 1481 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1482 if (newnp->opt)
d83d8461
ACM
1483 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1484 newnp->opt->opt_flen);
1da177e4 1485
5d424d5a 1486 tcp_mtup_init(newsk);
1da177e4
LT
1487 tcp_sync_mss(newsk, dst_mtu(dst));
1488 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1489 tcp_initialize_rcv_mss(newsk);
1490
c720c7e8
ED
1491 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1492 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1493
cfb6eeb4
YH
1494#ifdef CONFIG_TCP_MD5SIG
1495 /* Copy over the MD5 key from the original socket */
1496 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1497 /* We're using one, so create a matching key
1498 * on the newsk structure. If we fail to get
1499 * memory, then we end up not copying the key
1500 * across. Shucks.
1501 */
af879cc7
ACM
1502 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1503 if (newkey != NULL)
e547bc1e 1504 tcp_v6_md5_do_add(newsk, &newnp->daddr,
cfb6eeb4 1505 newkey, key->keylen);
cfb6eeb4
YH
1506 }
1507#endif
1508
9327f705 1509 __inet6_hash(newsk, NULL);
e56d8b8a 1510 __inet_inherit_port(sk, newsk);
1da177e4
LT
1511
1512 return newsk;
1513
1514out_overflow:
de0744af 1515 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1516out:
de0744af 1517 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1518 if (opt && opt != np->opt)
1519 sock_kfree_s(sk, opt, opt->tot_len);
1520 dst_release(dst);
1521 return NULL;
1522}
1523
b51655b9 1524static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1525{
84fa7933 1526 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1527 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1528 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1529 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1530 return 0;
fb286bb2 1531 }
1da177e4 1532 }
fb286bb2 1533
684f2176 1534 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1535 &ipv6_hdr(skb)->saddr,
1536 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1537
1da177e4 1538 if (skb->len <= 76) {
fb286bb2 1539 return __skb_checksum_complete(skb);
1da177e4
LT
1540 }
1541 return 0;
1542}
1543
1544/* The socket must have it's spinlock held when we get
1545 * here.
1546 *
1547 * We have a potential double-lock case here, so even when
1548 * doing backlog processing we use the BH locking scheme.
1549 * This is because we cannot sleep with the original spinlock
1550 * held.
1551 */
1552static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1553{
1554 struct ipv6_pinfo *np = inet6_sk(sk);
1555 struct tcp_sock *tp;
1556 struct sk_buff *opt_skb = NULL;
1557
1558 /* Imagine: socket is IPv6. IPv4 packet arrives,
1559 goes to IPv4 receive handler and backlogged.
1560 From backlog it always goes here. Kerboom...
1561 Fortunately, tcp_rcv_established and rcv_established
1562 handle them correctly, but it is not case with
1563 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1564 */
1565
1566 if (skb->protocol == htons(ETH_P_IP))
1567 return tcp_v4_do_rcv(sk, skb);
1568
cfb6eeb4
YH
1569#ifdef CONFIG_TCP_MD5SIG
1570 if (tcp_v6_inbound_md5_hash (sk, skb))
1571 goto discard;
1572#endif
1573
fda9ef5d 1574 if (sk_filter(sk, skb))
1da177e4
LT
1575 goto discard;
1576
1577 /*
1578 * socket locking is here for SMP purposes as backlog rcv
1579 * is currently called with bh processing disabled.
1580 */
1581
1582 /* Do Stevens' IPV6_PKTOPTIONS.
1583
1584 Yes, guys, it is the only place in our code, where we
1585 may make it not affecting IPv4.
1586 The rest of code is protocol independent,
1587 and I do not like idea to uglify IPv4.
1588
1589 Actually, all the idea behind IPV6_PKTOPTIONS
1590 looks not very well thought. For now we latch
1591 options, received in the last packet, enqueued
1592 by tcp. Feel free to propose better solution.
1ab1457c 1593 --ANK (980728)
1da177e4
LT
1594 */
1595 if (np->rxopt.all)
1596 opt_skb = skb_clone(skb, GFP_ATOMIC);
1597
1598 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1599 TCP_CHECK_TIMER(sk);
aa8223c7 1600 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1601 goto reset;
1602 TCP_CHECK_TIMER(sk);
1603 if (opt_skb)
1604 goto ipv6_pktoptions;
1605 return 0;
1606 }
1607
ab6a5bb6 1608 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1609 goto csum_err;
1610
1ab1457c 1611 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1612 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1613 if (!nsk)
1614 goto discard;
1615
1616 /*
1617 * Queue it on the new socket if the new socket is active,
1618 * otherwise we just shortcircuit this and continue with
1619 * the new socket..
1620 */
1ab1457c 1621 if(nsk != sk) {
1da177e4
LT
1622 if (tcp_child_process(sk, nsk, skb))
1623 goto reset;
1624 if (opt_skb)
1625 __kfree_skb(opt_skb);
1626 return 0;
1627 }
1628 }
1629
1630 TCP_CHECK_TIMER(sk);
aa8223c7 1631 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1632 goto reset;
1633 TCP_CHECK_TIMER(sk);
1634 if (opt_skb)
1635 goto ipv6_pktoptions;
1636 return 0;
1637
1638reset:
cfb6eeb4 1639 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1640discard:
1641 if (opt_skb)
1642 __kfree_skb(opt_skb);
1643 kfree_skb(skb);
1644 return 0;
1645csum_err:
63231bdd 1646 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1647 goto discard;
1648
1649
1650ipv6_pktoptions:
1651 /* Do you ask, what is it?
1652
1653 1. skb was enqueued by tcp.
1654 2. skb is added to tail of read queue, rather than out of order.
1655 3. socket is not in passive state.
1656 4. Finally, it really contains options, which user wants to receive.
1657 */
1658 tp = tcp_sk(sk);
1659 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1660 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1661 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1662 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1663 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1664 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1665 if (ipv6_opt_accepted(sk, opt_skb)) {
1666 skb_set_owner_r(opt_skb, sk);
1667 opt_skb = xchg(&np->pktoptions, opt_skb);
1668 } else {
1669 __kfree_skb(opt_skb);
1670 opt_skb = xchg(&np->pktoptions, NULL);
1671 }
1672 }
1673
800d55f1 1674 kfree_skb(opt_skb);
1da177e4
LT
1675 return 0;
1676}
1677
e5bbef20 1678static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1679{
1ab1457c 1680 struct tcphdr *th;
1da177e4
LT
1681 struct sock *sk;
1682 int ret;
a86b1e30 1683 struct net *net = dev_net(skb->dev);
1da177e4
LT
1684
1685 if (skb->pkt_type != PACKET_HOST)
1686 goto discard_it;
1687
1688 /*
1689 * Count it even if it's bad.
1690 */
63231bdd 1691 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1692
1693 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1694 goto discard_it;
1695
aa8223c7 1696 th = tcp_hdr(skb);
1da177e4
LT
1697
1698 if (th->doff < sizeof(struct tcphdr)/4)
1699 goto bad_packet;
1700 if (!pskb_may_pull(skb, th->doff*4))
1701 goto discard_it;
1702
60476372 1703 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1704 goto bad_packet;
1705
aa8223c7 1706 th = tcp_hdr(skb);
1da177e4
LT
1707 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1708 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1709 skb->len - th->doff*4);
1710 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1711 TCP_SKB_CB(skb)->when = 0;
0660e03f 1712 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1da177e4
LT
1713 TCP_SKB_CB(skb)->sacked = 0;
1714
9a1f27c4 1715 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1716 if (!sk)
1717 goto no_tcp_socket;
1718
1719process:
1720 if (sk->sk_state == TCP_TIME_WAIT)
1721 goto do_time_wait;
1722
1723 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1724 goto discard_and_relse;
1725
fda9ef5d 1726 if (sk_filter(sk, skb))
1da177e4
LT
1727 goto discard_and_relse;
1728
1729 skb->dev = NULL;
1730
293b9c42 1731 bh_lock_sock_nested(sk);
1da177e4
LT
1732 ret = 0;
1733 if (!sock_owned_by_user(sk)) {
1a2449a8 1734#ifdef CONFIG_NET_DMA
1ab1457c 1735 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1736 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1737 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1738 if (tp->ucopy.dma_chan)
1739 ret = tcp_v6_do_rcv(sk, skb);
1740 else
1a2449a8
CL
1741#endif
1742 {
1743 if (!tcp_prequeue(sk, skb))
1744 ret = tcp_v6_do_rcv(sk, skb);
1745 }
6cce09f8 1746 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1747 bh_unlock_sock(sk);
6cce09f8 1748 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1749 goto discard_and_relse;
1750 }
1da177e4
LT
1751 bh_unlock_sock(sk);
1752
1753 sock_put(sk);
1754 return ret ? -1 : 0;
1755
1756no_tcp_socket:
1757 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1758 goto discard_it;
1759
1760 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1761bad_packet:
63231bdd 1762 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1763 } else {
cfb6eeb4 1764 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1765 }
1766
1767discard_it:
1768
1769 /*
1770 * Discard frame
1771 */
1772
1773 kfree_skb(skb);
1774 return 0;
1775
1776discard_and_relse:
1777 sock_put(sk);
1778 goto discard_it;
1779
1780do_time_wait:
1781 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1782 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1783 goto discard_it;
1784 }
1785
1786 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1787 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1788 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1789 goto discard_it;
1790 }
1791
9469c7b4 1792 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1793 case TCP_TW_SYN:
1794 {
1795 struct sock *sk2;
1796
c346dca1 1797 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1798 &ipv6_hdr(skb)->daddr,
505cbfc5 1799 ntohs(th->dest), inet6_iif(skb));
1da177e4 1800 if (sk2 != NULL) {
295ff7ed
ACM
1801 struct inet_timewait_sock *tw = inet_twsk(sk);
1802 inet_twsk_deschedule(tw, &tcp_death_row);
1803 inet_twsk_put(tw);
1da177e4
LT
1804 sk = sk2;
1805 goto process;
1806 }
1807 /* Fall through to ACK */
1808 }
1809 case TCP_TW_ACK:
1810 tcp_v6_timewait_ack(sk, skb);
1811 break;
1812 case TCP_TW_RST:
1813 goto no_tcp_socket;
1814 case TCP_TW_SUCCESS:;
1815 }
1816 goto discard_it;
1817}
1818
1da177e4
LT
1819static int tcp_v6_remember_stamp(struct sock *sk)
1820{
1821 /* Alas, not yet... */
1822 return 0;
1823}
1824
3b401a81 1825static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1826 .queue_xmit = inet6_csk_xmit,
1827 .send_check = tcp_v6_send_check,
1828 .rebuild_header = inet6_sk_rebuild_header,
1829 .conn_request = tcp_v6_conn_request,
1830 .syn_recv_sock = tcp_v6_syn_recv_sock,
1831 .remember_stamp = tcp_v6_remember_stamp,
1832 .net_header_len = sizeof(struct ipv6hdr),
1833 .setsockopt = ipv6_setsockopt,
1834 .getsockopt = ipv6_getsockopt,
1835 .addr2sockaddr = inet6_csk_addr2sockaddr,
1836 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1837 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1838#ifdef CONFIG_COMPAT
543d9cfe
ACM
1839 .compat_setsockopt = compat_ipv6_setsockopt,
1840 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1841#endif
1da177e4
LT
1842};
1843
cfb6eeb4 1844#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1845static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1846 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1847 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1848 .md5_add = tcp_v6_md5_add_func,
1849 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1850};
a928630a 1851#endif
cfb6eeb4 1852
1da177e4
LT
1853/*
1854 * TCP over IPv4 via INET6 API
1855 */
1856
3b401a81 1857static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1858 .queue_xmit = ip_queue_xmit,
1859 .send_check = tcp_v4_send_check,
1860 .rebuild_header = inet_sk_rebuild_header,
1861 .conn_request = tcp_v6_conn_request,
1862 .syn_recv_sock = tcp_v6_syn_recv_sock,
1863 .remember_stamp = tcp_v4_remember_stamp,
1864 .net_header_len = sizeof(struct iphdr),
1865 .setsockopt = ipv6_setsockopt,
1866 .getsockopt = ipv6_getsockopt,
1867 .addr2sockaddr = inet6_csk_addr2sockaddr,
1868 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1869 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1870#ifdef CONFIG_COMPAT
543d9cfe
ACM
1871 .compat_setsockopt = compat_ipv6_setsockopt,
1872 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1873#endif
1da177e4
LT
1874};
1875
cfb6eeb4 1876#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1877static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1878 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1879 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1880 .md5_add = tcp_v6_md5_add_func,
1881 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1882};
a928630a 1883#endif
cfb6eeb4 1884
1da177e4
LT
1885/* NOTE: A lot of things set to zero explicitly by call to
1886 * sk_alloc() so need not be done here.
1887 */
1888static int tcp_v6_init_sock(struct sock *sk)
1889{
6687e988 1890 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1891 struct tcp_sock *tp = tcp_sk(sk);
1892
1893 skb_queue_head_init(&tp->out_of_order_queue);
1894 tcp_init_xmit_timers(sk);
1895 tcp_prequeue_init(tp);
1896
6687e988 1897 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1898 tp->mdev = TCP_TIMEOUT_INIT;
1899
1900 /* So many TCP implementations out there (incorrectly) count the
1901 * initial SYN frame in their delayed-ACK and congestion control
1902 * algorithms that we must have the following bandaid to talk
1903 * efficiently to them. -DaveM
1904 */
1905 tp->snd_cwnd = 2;
1906
1907 /* See draft-stevens-tcpca-spec-01 for discussion of the
1908 * initialization of these values.
1909 */
0b6a05c1 1910 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1911 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1912 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1913
1914 tp->reordering = sysctl_tcp_reordering;
1915
1916 sk->sk_state = TCP_CLOSE;
1917
8292a17a 1918 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1919 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1920 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1921 sk->sk_write_space = sk_stream_write_space;
1922 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1923
cfb6eeb4
YH
1924#ifdef CONFIG_TCP_MD5SIG
1925 tp->af_specific = &tcp_sock_ipv6_specific;
1926#endif
1927
435cf559
WAS
1928 /* TCP Cookie Transactions */
1929 if (sysctl_tcp_cookie_size > 0) {
1930 /* Default, cookies without s_data_payload. */
1931 tp->cookie_values =
1932 kzalloc(sizeof(*tp->cookie_values),
1933 sk->sk_allocation);
1934 if (tp->cookie_values != NULL)
1935 kref_init(&tp->cookie_values->kref);
1936 }
1937 /* Presumed zeroed, in order of appearance:
1938 * cookie_in_always, cookie_out_never,
1939 * s_data_constant, s_data_in, s_data_out
1940 */
1da177e4
LT
1941 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1942 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1943
eb4dea58 1944 local_bh_disable();
1748376b 1945 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1946 local_bh_enable();
1da177e4
LT
1947
1948 return 0;
1949}
1950
7d06b2e0 1951static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1952{
cfb6eeb4
YH
1953#ifdef CONFIG_TCP_MD5SIG
1954 /* Clean up the MD5 key list */
1955 if (tcp_sk(sk)->md5sig_info)
1956 tcp_v6_clear_md5_list(sk);
1957#endif
1da177e4 1958 tcp_v4_destroy_sock(sk);
7d06b2e0 1959 inet6_destroy_sock(sk);
1da177e4
LT
1960}
1961
952a10be 1962#ifdef CONFIG_PROC_FS
1da177e4 1963/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1964static void get_openreq6(struct seq_file *seq,
60236fdd 1965 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1966{
1da177e4 1967 int ttd = req->expires - jiffies;
ca304b61
ACM
1968 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1969 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1970
1971 if (ttd < 0)
1972 ttd = 0;
1973
1da177e4
LT
1974 seq_printf(seq,
1975 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1976 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1977 i,
1978 src->s6_addr32[0], src->s6_addr32[1],
1979 src->s6_addr32[2], src->s6_addr32[3],
fd507037 1980 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
1981 dest->s6_addr32[0], dest->s6_addr32[1],
1982 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1983 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1984 TCP_SYN_RECV,
1985 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1986 1, /* timers active (only the expire timer) */
1987 jiffies_to_clock_t(ttd),
1da177e4
LT
1988 req->retrans,
1989 uid,
1ab1457c 1990 0, /* non standard timer */
1da177e4
LT
1991 0, /* open_requests have no inode */
1992 0, req);
1993}
1994
1995static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1996{
1997 struct in6_addr *dest, *src;
1998 __u16 destp, srcp;
1999 int timer_active;
2000 unsigned long timer_expires;
2001 struct inet_sock *inet = inet_sk(sp);
2002 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 2003 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
2004 struct ipv6_pinfo *np = inet6_sk(sp);
2005
2006 dest = &np->daddr;
2007 src = &np->rcv_saddr;
c720c7e8
ED
2008 destp = ntohs(inet->inet_dport);
2009 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
2010
2011 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2012 timer_active = 1;
463c84b9
ACM
2013 timer_expires = icsk->icsk_timeout;
2014 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2015 timer_active = 4;
463c84b9 2016 timer_expires = icsk->icsk_timeout;
1da177e4
LT
2017 } else if (timer_pending(&sp->sk_timer)) {
2018 timer_active = 2;
2019 timer_expires = sp->sk_timer.expires;
2020 } else {
2021 timer_active = 0;
2022 timer_expires = jiffies;
2023 }
2024
2025 seq_printf(seq,
2026 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
7be87351 2027 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1da177e4
LT
2028 i,
2029 src->s6_addr32[0], src->s6_addr32[1],
2030 src->s6_addr32[2], src->s6_addr32[3], srcp,
2031 dest->s6_addr32[0], dest->s6_addr32[1],
2032 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 2033 sp->sk_state,
47da8ee6
SS
2034 tp->write_seq-tp->snd_una,
2035 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2036 timer_active,
2037 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2038 icsk->icsk_retransmits,
1da177e4 2039 sock_i_uid(sp),
6687e988 2040 icsk->icsk_probes_out,
1da177e4
LT
2041 sock_i_ino(sp),
2042 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
2043 jiffies_to_clock_t(icsk->icsk_rto),
2044 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2045 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
2046 tp->snd_cwnd,
2047 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
2048 );
2049}
2050
1ab1457c 2051static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 2052 struct inet_timewait_sock *tw, int i)
1da177e4
LT
2053{
2054 struct in6_addr *dest, *src;
2055 __u16 destp, srcp;
0fa1a53e 2056 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
2057 int ttd = tw->tw_ttd - jiffies;
2058
2059 if (ttd < 0)
2060 ttd = 0;
2061
0fa1a53e
ACM
2062 dest = &tw6->tw_v6_daddr;
2063 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
2064 destp = ntohs(tw->tw_dport);
2065 srcp = ntohs(tw->tw_sport);
2066
2067 seq_printf(seq,
2068 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2069 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2070 i,
2071 src->s6_addr32[0], src->s6_addr32[1],
2072 src->s6_addr32[2], src->s6_addr32[3], srcp,
2073 dest->s6_addr32[0], dest->s6_addr32[1],
2074 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2075 tw->tw_substate, 0, 0,
2076 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2077 atomic_read(&tw->tw_refcnt), tw);
2078}
2079
1da177e4
LT
2080static int tcp6_seq_show(struct seq_file *seq, void *v)
2081{
2082 struct tcp_iter_state *st;
2083
2084 if (v == SEQ_START_TOKEN) {
2085 seq_puts(seq,
2086 " sl "
2087 "local_address "
2088 "remote_address "
2089 "st tx_queue rx_queue tr tm->when retrnsmt"
2090 " uid timeout inode\n");
2091 goto out;
2092 }
2093 st = seq->private;
2094
2095 switch (st->state) {
2096 case TCP_SEQ_STATE_LISTENING:
2097 case TCP_SEQ_STATE_ESTABLISHED:
2098 get_tcp6_sock(seq, v, st->num);
2099 break;
2100 case TCP_SEQ_STATE_OPENREQ:
2101 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2102 break;
2103 case TCP_SEQ_STATE_TIME_WAIT:
2104 get_timewait6_sock(seq, v, st->num);
2105 break;
2106 }
2107out:
2108 return 0;
2109}
2110
1da177e4 2111static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2112 .name = "tcp6",
2113 .family = AF_INET6,
5f4472c5
DL
2114 .seq_fops = {
2115 .owner = THIS_MODULE,
2116 },
9427c4b3
DL
2117 .seq_ops = {
2118 .show = tcp6_seq_show,
2119 },
1da177e4
LT
2120};
2121
2c8c1e72 2122int __net_init tcp6_proc_init(struct net *net)
1da177e4 2123{
6f8b13bc 2124 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2125}
2126
6f8b13bc 2127void tcp6_proc_exit(struct net *net)
1da177e4 2128{
6f8b13bc 2129 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2130}
2131#endif
2132
2133struct proto tcpv6_prot = {
2134 .name = "TCPv6",
2135 .owner = THIS_MODULE,
2136 .close = tcp_close,
2137 .connect = tcp_v6_connect,
2138 .disconnect = tcp_disconnect,
463c84b9 2139 .accept = inet_csk_accept,
1da177e4
LT
2140 .ioctl = tcp_ioctl,
2141 .init = tcp_v6_init_sock,
2142 .destroy = tcp_v6_destroy_sock,
2143 .shutdown = tcp_shutdown,
2144 .setsockopt = tcp_setsockopt,
2145 .getsockopt = tcp_getsockopt,
1da177e4
LT
2146 .recvmsg = tcp_recvmsg,
2147 .backlog_rcv = tcp_v6_do_rcv,
2148 .hash = tcp_v6_hash,
ab1e0a13
ACM
2149 .unhash = inet_unhash,
2150 .get_port = inet_csk_get_port,
1da177e4
LT
2151 .enter_memory_pressure = tcp_enter_memory_pressure,
2152 .sockets_allocated = &tcp_sockets_allocated,
2153 .memory_allocated = &tcp_memory_allocated,
2154 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2155 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2156 .sysctl_mem = sysctl_tcp_mem,
2157 .sysctl_wmem = sysctl_tcp_wmem,
2158 .sysctl_rmem = sysctl_tcp_rmem,
2159 .max_header = MAX_TCP_HEADER,
2160 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2161 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2162 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2163 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2164 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2165#ifdef CONFIG_COMPAT
2166 .compat_setsockopt = compat_tcp_setsockopt,
2167 .compat_getsockopt = compat_tcp_getsockopt,
2168#endif
1da177e4
LT
2169};
2170
41135cc8 2171static const struct inet6_protocol tcpv6_protocol = {
1da177e4
LT
2172 .handler = tcp_v6_rcv,
2173 .err_handler = tcp_v6_err,
a430a43d 2174 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2175 .gso_segment = tcp_tso_segment,
684f2176
HX
2176 .gro_receive = tcp6_gro_receive,
2177 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2178 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2179};
2180
1da177e4
LT
2181static struct inet_protosw tcpv6_protosw = {
2182 .type = SOCK_STREAM,
2183 .protocol = IPPROTO_TCP,
2184 .prot = &tcpv6_prot,
2185 .ops = &inet6_stream_ops,
1da177e4 2186 .no_check = 0,
d83d8461
ACM
2187 .flags = INET_PROTOSW_PERMANENT |
2188 INET_PROTOSW_ICSK,
1da177e4
LT
2189};
2190
2c8c1e72 2191static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2192{
5677242f
DL
2193 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2194 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2195}
2196
2c8c1e72 2197static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2198{
5677242f 2199 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2200}
2201
2c8c1e72 2202static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2203{
2204 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2205}
2206
2207static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2208 .init = tcpv6_net_init,
2209 .exit = tcpv6_net_exit,
2210 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2211};
2212
7f4e4868 2213int __init tcpv6_init(void)
1da177e4 2214{
7f4e4868
DL
2215 int ret;
2216
2217 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2218 if (ret)
2219 goto out;
2220
1da177e4 2221 /* register inet6 protocol */
7f4e4868
DL
2222 ret = inet6_register_protosw(&tcpv6_protosw);
2223 if (ret)
2224 goto out_tcpv6_protocol;
2225
93ec926b 2226 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2227 if (ret)
2228 goto out_tcpv6_protosw;
2229out:
2230 return ret;
ae0f7d5f 2231
7f4e4868
DL
2232out_tcpv6_protocol:
2233 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2234out_tcpv6_protosw:
2235 inet6_unregister_protosw(&tcpv6_protosw);
2236 goto out;
2237}
2238
09f7709f 2239void tcpv6_exit(void)
7f4e4868 2240{
93ec926b 2241 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2242 inet6_unregister_protosw(&tcpv6_protosw);
2243 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2244}