macvlan/macvtap: Fix unicast between macvtap interfaces in bridge mode
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
6e5714ea 64#include <net/secure_seq.h>
1da177e4
LT
65
66#include <asm/uaccess.h>
67
68#include <linux/proc_fs.h>
69#include <linux/seq_file.h>
70
cfb6eeb4
YH
71#include <linux/crypto.h>
72#include <linux/scatterlist.h>
73
cfb6eeb4 74static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
75static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
1da177e4
LT
77
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
8ad50d96 79static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42
ED
80 const struct in6_addr *saddr,
81 const struct in6_addr *daddr);
1da177e4 82
3b401a81
SH
83static const struct inet_connection_sock_af_ops ipv6_mapped;
84static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 85#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
86static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
87static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
88#else
89static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 90 const struct in6_addr *addr)
9501f972
YH
91{
92 return NULL;
93}
a928630a 94#endif
1da177e4 95
1da177e4
LT
96static void tcp_v6_hash(struct sock *sk)
97{
98 if (sk->sk_state != TCP_CLOSE) {
8292a17a 99 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
100 tcp_prot.hash(sk);
101 return;
102 }
103 local_bh_disable();
9327f705 104 __inet6_hash(sk, NULL);
1da177e4
LT
105 local_bh_enable();
106 }
107}
108
684f2176 109static __inline__ __sum16 tcp_v6_check(int len,
b71d1d42
ED
110 const struct in6_addr *saddr,
111 const struct in6_addr *daddr,
868c86bc 112 __wsum base)
1da177e4
LT
113{
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
115}
116
a94f723d 117static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 118{
0660e03f
ACM
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
121 tcp_hdr(skb)->dest,
122 tcp_hdr(skb)->source);
1da177e4
LT
123}
124
1ab1457c 125static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
126 int addr_len)
127{
128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 129 struct inet_sock *inet = inet_sk(sk);
d83d8461 130 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 133 struct in6_addr *saddr = NULL, *final_p, final;
493f377d 134 struct rt6_info *rt;
4c9483b2 135 struct flowi6 fl6;
1da177e4
LT
136 struct dst_entry *dst;
137 int addr_type;
138 int err;
139
1ab1457c 140 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
141 return -EINVAL;
142
1ab1457c 143 if (usin->sin6_family != AF_INET6)
a02cec21 144 return -EAFNOSUPPORT;
1da177e4 145
4c9483b2 146 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
147
148 if (np->sndflow) {
4c9483b2
DM
149 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
150 IP6_ECN_flow_init(fl6.flowlabel);
151 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 152 struct ip6_flowlabel *flowlabel;
4c9483b2 153 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1da177e4
LT
154 if (flowlabel == NULL)
155 return -EINVAL;
156 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
157 fl6_sock_release(flowlabel);
158 }
159 }
160
161 /*
1ab1457c
YH
162 * connect() to INADDR_ANY means loopback (BSD'ism).
163 */
164
165 if(ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
167
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
169
170 if(addr_type & IPV6_ADDR_MULTICAST)
171 return -ENETUNREACH;
172
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
177 * must coincide.
178 */
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 return -EINVAL;
182
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 }
185
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
188 return -EINVAL;
189 }
190
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
195 tp->write_seq = 0;
196 }
197
198 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
4c9483b2 199 np->flow_label = fl6.flowlabel;
1da177e4
LT
200
201 /*
202 * TCP over IPv4
203 */
204
205 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
207 struct sockaddr_in sin;
208
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
210
211 if (__ipv6_only_sock(sk))
212 return -ENETUNREACH;
213
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
217
d83d8461 218 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
220#ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222#endif
1da177e4
LT
223
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
225
226 if (err) {
d83d8461
ACM
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
230#ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
232#endif
1da177e4
LT
233 goto failure;
234 } else {
c720c7e8
ED
235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
237 &np->rcv_saddr);
1da177e4
LT
238 }
239
240 return err;
241 }
242
243 if (!ipv6_addr_any(&np->rcv_saddr))
244 saddr = &np->rcv_saddr;
245
4c9483b2
DM
246 fl6.flowi6_proto = IPPROTO_TCP;
247 ipv6_addr_copy(&fl6.daddr, &np->daddr);
248 ipv6_addr_copy(&fl6.saddr,
1da177e4 249 (saddr ? saddr : &np->saddr));
4c9483b2
DM
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
1da177e4 254
4c9483b2 255 final_p = fl6_update_dst(&fl6, np->opt, &final);
1da177e4 256
4c9483b2 257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 258
4c9483b2 259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
68d0c6d3
DM
260 if (IS_ERR(dst)) {
261 err = PTR_ERR(dst);
1da177e4 262 goto failure;
14e50e57 263 }
1da177e4
LT
264
265 if (saddr == NULL) {
4c9483b2 266 saddr = &fl6.saddr;
1da177e4
LT
267 ipv6_addr_copy(&np->rcv_saddr, saddr);
268 }
269
270 /* set the source address */
271 ipv6_addr_copy(&np->saddr, saddr);
c720c7e8 272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 273
f83ef8c0 274 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 275 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 276
493f377d
DM
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
282 /*
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
287 */
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
293 }
294 }
295 }
296
d83d8461 297 icsk->icsk_ext_hdr_len = 0;
1da177e4 298 if (np->opt)
d83d8461
ACM
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 np->opt->opt_nflen);
1da177e4
LT
301
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303
c720c7e8 304 inet->inet_dport = usin->sin6_port;
1da177e4
LT
305
306 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 307 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
308 if (err)
309 goto late_failure;
310
311 if (!tp->write_seq)
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 np->daddr.s6_addr32,
c720c7e8
ED
314 inet->inet_sport,
315 inet->inet_dport);
1da177e4
LT
316
317 err = tcp_connect(sk);
318 if (err)
319 goto late_failure;
320
321 return 0;
322
323late_failure:
324 tcp_set_state(sk, TCP_CLOSE);
325 __sk_dst_reset(sk);
326failure:
c720c7e8 327 inet->inet_dport = 0;
1da177e4
LT
328 sk->sk_route_caps = 0;
329 return err;
330}
331
332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 333 u8 type, u8 code, int offset, __be32 info)
1da177e4 334{
b71d1d42 335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
505cbfc5 336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
337 struct ipv6_pinfo *np;
338 struct sock *sk;
339 int err;
1ab1457c 340 struct tcp_sock *tp;
1da177e4 341 __u32 seq;
ca12a1a4 342 struct net *net = dev_net(skb->dev);
1da177e4 343
ca12a1a4 344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
346
347 if (sk == NULL) {
e41b5368
DL
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 ICMP6_MIB_INERRORS);
1da177e4
LT
350 return;
351 }
352
353 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 354 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
355 return;
356 }
357
358 bh_lock_sock(sk);
359 if (sock_owned_by_user(sk))
de0744af 360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
361
362 if (sk->sk_state == TCP_CLOSE)
363 goto out;
364
e802af9c
SH
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 goto out;
368 }
369
1da177e4 370 tp = tcp_sk(sk);
1ab1457c 371 seq = ntohl(th->seq);
1da177e4
LT
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
375 goto out;
376 }
377
378 np = inet6_sk(sk);
379
380 if (type == ICMPV6_PKT_TOOBIG) {
68d0c6d3 381 struct dst_entry *dst;
1da177e4
LT
382
383 if (sock_owned_by_user(sk))
384 goto out;
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 goto out;
387
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
390
391 if (dst == NULL) {
392 struct inet_sock *inet = inet_sk(sk);
4c9483b2 393 struct flowi6 fl6;
1da177e4
LT
394
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
397 for now.
398 */
4c9483b2
DM
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 ipv6_addr_copy(&fl6.daddr, &np->daddr);
402 ipv6_addr_copy(&fl6.saddr, &np->saddr);
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
4c9483b2
DM
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
68d0c6d3
DM
410 if (IS_ERR(dst)) {
411 sk->sk_err_soft = -PTR_ERR(dst);
1da177e4
LT
412 goto out;
413 }
414
415 } else
416 dst_hold(dst);
417
d83d8461 418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
422 dst_release(dst);
423 goto out;
424 }
425
426 icmpv6_err_convert(type, code, &err);
427
60236fdd 428 /* Might be for an request_sock */
1da177e4 429 switch (sk->sk_state) {
60236fdd 430 struct request_sock *req, **prev;
1da177e4
LT
431 case TCP_LISTEN:
432 if (sock_owned_by_user(sk))
433 goto out;
434
8129765a
ACM
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
437 if (!req)
438 goto out;
439
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
442 */
547b792c 443 WARN_ON(req->sk != NULL);
1da177e4 444
2e6599cb 445 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
447 goto out;
448 }
449
463c84b9 450 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
451 goto out;
452
453 case TCP_SYN_SENT:
454 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 455 It can, it SYNs are crossed. --ANK */
1da177e4 456 if (!sock_owned_by_user(sk)) {
1da177e4
LT
457 sk->sk_err = err;
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
459
460 tcp_done(sk);
461 } else
462 sk->sk_err_soft = err;
463 goto out;
464 }
465
466 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_err = err;
468 sk->sk_error_report(sk);
469 } else
470 sk->sk_err_soft = err;
471
472out:
473 bh_unlock_sock(sk);
474 sock_put(sk);
475}
476
477
e6b4d113
WAS
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp)
1da177e4 480{
ca304b61 481 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
482 struct ipv6_pinfo *np = inet6_sk(sk);
483 struct sk_buff * skb;
484 struct ipv6_txoptions *opt = NULL;
20c59de2 485 struct in6_addr * final_p, final;
4c9483b2 486 struct flowi6 fl6;
fd80eb94 487 struct dst_entry *dst;
68d0c6d3 488 int err;
1da177e4 489
4c9483b2
DM
490 memset(&fl6, 0, sizeof(fl6));
491 fl6.flowi6_proto = IPPROTO_TCP;
492 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
493 ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
494 fl6.flowlabel = 0;
495 fl6.flowi6_oif = treq->iif;
496 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
497 fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 fl6.fl6_sport = inet_rsk(req)->loc_port;
4c9483b2 499 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
1da177e4 500
fd80eb94 501 opt = np->opt;
4c9483b2 502 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 503
4c9483b2 504 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
68d0c6d3
DM
505 if (IS_ERR(dst)) {
506 err = PTR_ERR(dst);
738faca3 507 dst = NULL;
fd80eb94 508 goto done;
68d0c6d3 509 }
e6b4d113 510 skb = tcp_make_synack(sk, dst, req, rvp);
68d0c6d3 511 err = -ENOMEM;
1da177e4 512 if (skb) {
8ad50d96 513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
1da177e4 514
4c9483b2
DM
515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
516 err = ip6_xmit(sk, skb, &fl6, opt);
b9df3cb8 517 err = net_xmit_eval(err);
1da177e4
LT
518 }
519
520done:
1ab1457c 521 if (opt && opt != np->opt)
1da177e4 522 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 523 dst_release(dst);
1da177e4
LT
524 return err;
525}
526
72659ecc
OP
527static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp)
529{
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp);
532}
533
60236fdd 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 535{
800d55f1 536 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
537}
538
cfb6eeb4
YH
539#ifdef CONFIG_TCP_MD5SIG
540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 541 const struct in6_addr *addr)
cfb6eeb4
YH
542{
543 struct tcp_sock *tp = tcp_sk(sk);
544 int i;
545
546 BUG_ON(tp == NULL);
547
548 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
549 return NULL;
550
551 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 553 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
554 }
555 return NULL;
556}
557
558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
559 struct sock *addr_sk)
560{
561 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
562}
563
564static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
565 struct request_sock *req)
566{
567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
568}
569
b71d1d42 570static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
cfb6eeb4
YH
571 char *newkey, u8 newkeylen)
572{
573 /* Add key to the list */
b0a713e9 574 struct tcp_md5sig_key *key;
cfb6eeb4
YH
575 struct tcp_sock *tp = tcp_sk(sk);
576 struct tcp6_md5sig_key *keys;
577
b0a713e9 578 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
579 if (key) {
580 /* modify existing entry - just update that one */
b0a713e9
MD
581 kfree(key->key);
582 key->key = newkey;
583 key->keylen = newkeylen;
cfb6eeb4
YH
584 } else {
585 /* reallocate new list if current one is full. */
586 if (!tp->md5sig_info) {
587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
588 if (!tp->md5sig_info) {
589 kfree(newkey);
590 return -ENOMEM;
591 }
a465419b 592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4 593 }
aa133076 594 if (tcp_alloc_md5sig_pool(sk) == NULL) {
aacbe8c8
YH
595 kfree(newkey);
596 return -ENOMEM;
597 }
cfb6eeb4
YH
598 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
599 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
601
602 if (!keys) {
603 tcp_free_md5sig_pool();
604 kfree(newkey);
605 return -ENOMEM;
606 }
607
608 if (tp->md5sig_info->entries6)
609 memmove(keys, tp->md5sig_info->keys6,
610 (sizeof (tp->md5sig_info->keys6[0]) *
611 tp->md5sig_info->entries6));
612
613 kfree(tp->md5sig_info->keys6);
614 tp->md5sig_info->keys6 = keys;
615 tp->md5sig_info->alloced6++;
616 }
617
618 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
619 peer);
f8ab18d2
DM
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
622
623 tp->md5sig_info->entries6++;
624 }
625 return 0;
626}
627
628static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
629 u8 *newkey, __u8 newkeylen)
630{
631 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
632 newkey, newkeylen);
633}
634
b71d1d42 635static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
cfb6eeb4
YH
636{
637 struct tcp_sock *tp = tcp_sk(sk);
638 int i;
639
640 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 641 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 642 /* Free the key */
f8ab18d2 643 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
644 tp->md5sig_info->entries6--;
645
646 if (tp->md5sig_info->entries6 == 0) {
647 kfree(tp->md5sig_info->keys6);
648 tp->md5sig_info->keys6 = NULL;
ca983cef 649 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
650 } else {
651 /* shrink the database */
652 if (tp->md5sig_info->entries6 != i)
653 memmove(&tp->md5sig_info->keys6[i],
654 &tp->md5sig_info->keys6[i+1],
655 (tp->md5sig_info->entries6 - i)
656 * sizeof (tp->md5sig_info->keys6[0]));
657 }
77adefdc
YH
658 tcp_free_md5sig_pool();
659 return 0;
cfb6eeb4
YH
660 }
661 }
662 return -ENOENT;
663}
664
665static void tcp_v6_clear_md5_list (struct sock *sk)
666{
667 struct tcp_sock *tp = tcp_sk(sk);
668 int i;
669
670 if (tp->md5sig_info->entries6) {
671 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 672 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
673 tp->md5sig_info->entries6 = 0;
674 tcp_free_md5sig_pool();
675 }
676
677 kfree(tp->md5sig_info->keys6);
678 tp->md5sig_info->keys6 = NULL;
679 tp->md5sig_info->alloced6 = 0;
680
681 if (tp->md5sig_info->entries4) {
682 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 683 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
684 tp->md5sig_info->entries4 = 0;
685 tcp_free_md5sig_pool();
686 }
687
688 kfree(tp->md5sig_info->keys4);
689 tp->md5sig_info->keys4 = NULL;
690 tp->md5sig_info->alloced4 = 0;
691}
692
693static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
694 int optlen)
695{
696 struct tcp_md5sig cmd;
697 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
698 u8 *newkey;
699
700 if (optlen < sizeof(cmd))
701 return -EINVAL;
702
703 if (copy_from_user(&cmd, optval, sizeof(cmd)))
704 return -EFAULT;
705
706 if (sin6->sin6_family != AF_INET6)
707 return -EINVAL;
708
709 if (!cmd.tcpm_keylen) {
710 if (!tcp_sk(sk)->md5sig_info)
711 return -ENOENT;
e773e4fa 712 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
713 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
714 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
715 }
716
717 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
718 return -EINVAL;
719
720 if (!tcp_sk(sk)->md5sig_info) {
721 struct tcp_sock *tp = tcp_sk(sk);
722 struct tcp_md5sig_info *p;
723
724 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
725 if (!p)
726 return -ENOMEM;
727
728 tp->md5sig_info = p;
a465419b 729 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
730 }
731
af879cc7 732 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
733 if (!newkey)
734 return -ENOMEM;
e773e4fa 735 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
736 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
737 newkey, cmd.tcpm_keylen);
738 }
739 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
740}
741
49a72dfb 742static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
b71d1d42
ED
743 const struct in6_addr *daddr,
744 const struct in6_addr *saddr, int nbytes)
cfb6eeb4 745{
cfb6eeb4 746 struct tcp6_pseudohdr *bp;
49a72dfb 747 struct scatterlist sg;
8d26d76d 748
cfb6eeb4 749 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
750 /* 1. TCP pseudo-header (RFC2460) */
751 ipv6_addr_copy(&bp->saddr, saddr);
752 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 753 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 754 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 755
49a72dfb
AL
756 sg_init_one(&sg, bp, sizeof(*bp));
757 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
758}
c7da57a1 759
49a72dfb 760static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
b71d1d42 761 const struct in6_addr *daddr, struct in6_addr *saddr,
49a72dfb
AL
762 struct tcphdr *th)
763{
764 struct tcp_md5sig_pool *hp;
765 struct hash_desc *desc;
766
767 hp = tcp_get_md5sig_pool();
768 if (!hp)
769 goto clear_hash_noput;
770 desc = &hp->md5_desc;
771
772 if (crypto_hash_init(desc))
773 goto clear_hash;
774 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
775 goto clear_hash;
776 if (tcp_md5_hash_header(hp, th))
777 goto clear_hash;
778 if (tcp_md5_hash_key(hp, key))
779 goto clear_hash;
780 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 781 goto clear_hash;
cfb6eeb4 782
cfb6eeb4 783 tcp_put_md5sig_pool();
cfb6eeb4 784 return 0;
49a72dfb 785
cfb6eeb4
YH
786clear_hash:
787 tcp_put_md5sig_pool();
788clear_hash_noput:
789 memset(md5_hash, 0, 16);
49a72dfb 790 return 1;
cfb6eeb4
YH
791}
792
49a72dfb
AL
793static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
794 struct sock *sk, struct request_sock *req,
795 struct sk_buff *skb)
cfb6eeb4 796{
b71d1d42 797 const struct in6_addr *saddr, *daddr;
49a72dfb
AL
798 struct tcp_md5sig_pool *hp;
799 struct hash_desc *desc;
800 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
801
802 if (sk) {
803 saddr = &inet6_sk(sk)->saddr;
804 daddr = &inet6_sk(sk)->daddr;
49a72dfb 805 } else if (req) {
cfb6eeb4
YH
806 saddr = &inet6_rsk(req)->loc_addr;
807 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb 808 } else {
b71d1d42 809 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
810 saddr = &ip6h->saddr;
811 daddr = &ip6h->daddr;
cfb6eeb4 812 }
49a72dfb
AL
813
814 hp = tcp_get_md5sig_pool();
815 if (!hp)
816 goto clear_hash_noput;
817 desc = &hp->md5_desc;
818
819 if (crypto_hash_init(desc))
820 goto clear_hash;
821
822 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
823 goto clear_hash;
824 if (tcp_md5_hash_header(hp, th))
825 goto clear_hash;
826 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
827 goto clear_hash;
828 if (tcp_md5_hash_key(hp, key))
829 goto clear_hash;
830 if (crypto_hash_final(desc, md5_hash))
831 goto clear_hash;
832
833 tcp_put_md5sig_pool();
834 return 0;
835
836clear_hash:
837 tcp_put_md5sig_pool();
838clear_hash_noput:
839 memset(md5_hash, 0, 16);
840 return 1;
cfb6eeb4
YH
841}
842
843static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
844{
845 __u8 *hash_location = NULL;
846 struct tcp_md5sig_key *hash_expected;
b71d1d42 847 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 848 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 849 int genhash;
cfb6eeb4
YH
850 u8 newhash[16];
851
852 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 853 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 854
785957d3
DM
855 /* We've parsed the options - do we have a hash? */
856 if (!hash_expected && !hash_location)
857 return 0;
858
859 if (hash_expected && !hash_location) {
860 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
861 return 1;
862 }
863
785957d3
DM
864 if (!hash_expected && hash_location) {
865 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
866 return 1;
867 }
868
869 /* check the signature */
49a72dfb
AL
870 genhash = tcp_v6_md5_hash_skb(newhash,
871 hash_expected,
872 NULL, NULL, skb);
873
cfb6eeb4
YH
874 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
875 if (net_ratelimit()) {
5856b606 876 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
cfb6eeb4 877 genhash ? "failed" : "mismatch",
0c6ce78a
HH
878 &ip6h->saddr, ntohs(th->source),
879 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
880 }
881 return 1;
882 }
883 return 0;
884}
885#endif
886
c6aefafb 887struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 888 .family = AF_INET6,
2e6599cb 889 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 890 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
891 .send_ack = tcp_v6_reqsk_send_ack,
892 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
893 .send_reset = tcp_v6_send_reset,
894 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
895};
896
cfb6eeb4 897#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 898static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 899 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 900 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 901};
b6332e6c 902#endif
cfb6eeb4 903
8ad50d96 904static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42 905 const struct in6_addr *saddr, const struct in6_addr *daddr)
1da177e4 906{
aa8223c7 907 struct tcphdr *th = tcp_hdr(skb);
1da177e4 908
84fa7933 909 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8ad50d96 910 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
663ead3b 911 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 912 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 913 } else {
8ad50d96
HX
914 th->check = tcp_v6_check(skb->len, saddr, daddr,
915 csum_partial(th, th->doff << 2,
916 skb->csum));
1da177e4
LT
917 }
918}
919
bb296246 920static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
8ad50d96
HX
921{
922 struct ipv6_pinfo *np = inet6_sk(sk);
923
924 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
925}
926
a430a43d
HX
927static int tcp_v6_gso_send_check(struct sk_buff *skb)
928{
b71d1d42 929 const struct ipv6hdr *ipv6h;
a430a43d
HX
930 struct tcphdr *th;
931
932 if (!pskb_may_pull(skb, sizeof(*th)))
933 return -EINVAL;
934
0660e03f 935 ipv6h = ipv6_hdr(skb);
aa8223c7 936 th = tcp_hdr(skb);
a430a43d
HX
937
938 th->check = 0;
84fa7933 939 skb->ip_summed = CHECKSUM_PARTIAL;
8ad50d96 940 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
a430a43d
HX
941 return 0;
942}
1da177e4 943
36990673
HX
944static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
945 struct sk_buff *skb)
684f2176 946{
b71d1d42 947 const struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
948
949 switch (skb->ip_summed) {
950 case CHECKSUM_COMPLETE:
86911732 951 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
952 skb->csum)) {
953 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 break;
955 }
956
957 /* fall through */
958 case CHECKSUM_NONE:
959 NAPI_GRO_CB(skb)->flush = 1;
960 return NULL;
961 }
962
963 return tcp_gro_receive(head, skb);
964}
684f2176 965
36990673 966static int tcp6_gro_complete(struct sk_buff *skb)
684f2176 967{
b71d1d42 968 const struct ipv6hdr *iph = ipv6_hdr(skb);
684f2176
HX
969 struct tcphdr *th = tcp_hdr(skb);
970
971 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
972 &iph->saddr, &iph->daddr, 0);
973 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
974
975 return tcp_gro_complete(skb);
976}
684f2176 977
626e264d
IJ
978static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
979 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 980{
aa8223c7 981 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4 982 struct sk_buff *buff;
4c9483b2 983 struct flowi6 fl6;
adf30907 984 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 985 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 986 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 987 struct dst_entry *dst;
81ada62d 988 __be32 *topt;
1da177e4 989
626e264d
IJ
990 if (ts)
991 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 992#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
993 if (key)
994 tot_len += TCPOLEN_MD5SIG_ALIGNED;
995#endif
996
cfb6eeb4 997 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 998 GFP_ATOMIC);
1ab1457c
YH
999 if (buff == NULL)
1000 return;
1da177e4 1001
cfb6eeb4 1002 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1003
cfb6eeb4 1004 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 1005 skb_reset_transport_header(buff);
1da177e4
LT
1006
1007 /* Swap the send and the receive. */
1008 memset(t1, 0, sizeof(*t1));
1009 t1->dest = th->source;
1010 t1->source = th->dest;
cfb6eeb4 1011 t1->doff = tot_len / 4;
626e264d
IJ
1012 t1->seq = htonl(seq);
1013 t1->ack_seq = htonl(ack);
1014 t1->ack = !rst || !th->ack;
1015 t1->rst = rst;
1016 t1->window = htons(win);
1da177e4 1017
81ada62d
IJ
1018 topt = (__be32 *)(t1 + 1);
1019
626e264d
IJ
1020 if (ts) {
1021 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1022 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1023 *topt++ = htonl(tcp_time_stamp);
1024 *topt++ = htonl(ts);
1025 }
1026
cfb6eeb4
YH
1027#ifdef CONFIG_TCP_MD5SIG
1028 if (key) {
81ada62d
IJ
1029 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1030 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1031 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1032 &ipv6_hdr(skb)->saddr,
1033 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1034 }
1035#endif
1036
4c9483b2
DM
1037 memset(&fl6, 0, sizeof(fl6));
1038 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1039 ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
1da177e4 1040
e5700aff
DM
1041 buff->ip_summed = CHECKSUM_PARTIAL;
1042 buff->csum = 0;
1043
4c9483b2 1044 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 1045
4c9483b2
DM
1046 fl6.flowi6_proto = IPPROTO_TCP;
1047 fl6.flowi6_oif = inet6_iif(skb);
1958b856
DM
1048 fl6.fl6_dport = t1->dest;
1049 fl6.fl6_sport = t1->source;
4c9483b2 1050 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 1051
c20121ae
DL
1052 /* Pass a socket to ip6_dst_lookup either it is for RST
1053 * Underlying function will use this to retrieve the network
1054 * namespace
1055 */
4c9483b2 1056 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
68d0c6d3
DM
1057 if (!IS_ERR(dst)) {
1058 skb_dst_set(buff, dst);
4c9483b2 1059 ip6_xmit(ctl_sk, buff, &fl6, NULL);
68d0c6d3
DM
1060 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1061 if (rst)
1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1063 return;
1da177e4
LT
1064 }
1065
1066 kfree_skb(buff);
1067}
1068
626e264d 1069static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1070{
626e264d
IJ
1071 struct tcphdr *th = tcp_hdr(skb);
1072 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1073 struct tcp_md5sig_key *key = NULL;
1da177e4 1074
626e264d 1075 if (th->rst)
1da177e4
LT
1076 return;
1077
626e264d
IJ
1078 if (!ipv6_unicast_destination(skb))
1079 return;
1da177e4 1080
cfb6eeb4 1081#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1082 if (sk)
1083 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1084#endif
1085
626e264d
IJ
1086 if (th->ack)
1087 seq = ntohl(th->ack_seq);
1088 else
1089 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1090 (th->doff << 2);
1da177e4 1091
626e264d
IJ
1092 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1093}
1da177e4 1094
626e264d
IJ
1095static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1096 struct tcp_md5sig_key *key)
1097{
1098 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1099}
1100
1101static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1102{
8feaf0c0 1103 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1104 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1105
9501f972 1106 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1107 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1108 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1109
8feaf0c0 1110 inet_twsk_put(tw);
1da177e4
LT
1111}
1112
6edafaaf
GJ
1113static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1114 struct request_sock *req)
1da177e4 1115{
9501f972 1116 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1117 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1118}
1119
1120
1121static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1122{
60236fdd 1123 struct request_sock *req, **prev;
aa8223c7 1124 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1125 struct sock *nsk;
1126
1127 /* Find possible connection requests. */
8129765a 1128 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1129 &ipv6_hdr(skb)->saddr,
1130 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1131 if (req)
1132 return tcp_check_req(sk, skb, req, prev);
1133
3b1e0a65 1134 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1135 &ipv6_hdr(skb)->saddr, th->source,
1136 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1137
1138 if (nsk) {
1139 if (nsk->sk_state != TCP_TIME_WAIT) {
1140 bh_lock_sock(nsk);
1141 return nsk;
1142 }
9469c7b4 1143 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1144 return NULL;
1145 }
1146
c6aefafb 1147#ifdef CONFIG_SYN_COOKIES
af9b4738 1148 if (!th->syn)
c6aefafb 1149 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1150#endif
1151 return sk;
1152}
1153
1da177e4
LT
1154/* FIXME: this is substantially similar to the ipv4 code.
1155 * Can some kind of merge be done? -- erics
1156 */
1157static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1158{
4957faad 1159 struct tcp_extend_values tmp_ext;
e6b4d113 1160 struct tcp_options_received tmp_opt;
4957faad 1161 u8 *hash_location;
e6b4d113 1162 struct request_sock *req;
ca304b61 1163 struct inet6_request_sock *treq;
1da177e4 1164 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1165 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1166 __u32 isn = TCP_SKB_CB(skb)->when;
493f377d 1167 struct dst_entry *dst = NULL;
c6aefafb 1168 int want_cookie = 0;
1da177e4
LT
1169
1170 if (skb->protocol == htons(ETH_P_IP))
1171 return tcp_v4_conn_request(sk, skb);
1172
1173 if (!ipv6_unicast_destination(skb))
1ab1457c 1174 goto drop;
1da177e4 1175
463c84b9 1176 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1177 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1178 if (!want_cookie)
1179 goto drop;
1da177e4
LT
1180 }
1181
463c84b9 1182 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1183 goto drop;
1184
ca304b61 1185 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1186 if (req == NULL)
1187 goto drop;
1188
cfb6eeb4
YH
1189#ifdef CONFIG_TCP_MD5SIG
1190 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1191#endif
1192
1da177e4
LT
1193 tcp_clear_options(&tmp_opt);
1194 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1195 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1196 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1197
1198 if (tmp_opt.cookie_plus > 0 &&
1199 tmp_opt.saw_tstamp &&
1200 !tp->rx_opt.cookie_out_never &&
1201 (sysctl_tcp_cookie_size > 0 ||
1202 (tp->cookie_values != NULL &&
1203 tp->cookie_values->cookie_desired > 0))) {
1204 u8 *c;
1205 u32 *d;
1206 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1207 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1208
1209 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1210 goto drop_and_free;
1211
1212 /* Secret recipe starts with IP addresses */
0eae88f3 1213 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
4957faad
WAS
1214 *mess++ ^= *d++;
1215 *mess++ ^= *d++;
1216 *mess++ ^= *d++;
1217 *mess++ ^= *d++;
0eae88f3 1218 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
4957faad
WAS
1219 *mess++ ^= *d++;
1220 *mess++ ^= *d++;
1221 *mess++ ^= *d++;
1222 *mess++ ^= *d++;
1223
1224 /* plus variable length Initiator Cookie */
1225 c = (u8 *)mess;
1226 while (l-- > 0)
1227 *c++ ^= *hash_location++;
1da177e4 1228
4957faad 1229 want_cookie = 0; /* not our kind of cookie */
4957faad
WAS
1230 tmp_ext.cookie_out_never = 0; /* false */
1231 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1232 } else if (!tp->rx_opt.cookie_in_always) {
1233 /* redundant indications, but ensure initialization. */
1234 tmp_ext.cookie_out_never = 1; /* true */
1235 tmp_ext.cookie_plus = 0;
1236 } else {
1237 goto drop_and_free;
1238 }
1239 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1240
4dfc2817 1241 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1242 tcp_clear_options(&tmp_opt);
c6aefafb 1243
1da177e4
LT
1244 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1245 tcp_openreq_init(req, &tmp_opt, skb);
1246
ca304b61 1247 treq = inet6_rsk(req);
0660e03f
ACM
1248 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1249 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
172d69e6 1250 if (!want_cookie || tmp_opt.tstamp_ok)
c6aefafb
GG
1251 TCP_ECN_create_request(req, tcp_hdr(skb));
1252
2bbdf389 1253 if (!isn) {
493f377d
DM
1254 struct inet_peer *peer = NULL;
1255
c6aefafb
GG
1256 if (ipv6_opt_accepted(sk, skb) ||
1257 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1258 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1259 atomic_inc(&skb->users);
1260 treq->pktopts = skb;
1261 }
1262 treq->iif = sk->sk_bound_dev_if;
1da177e4 1263
c6aefafb
GG
1264 /* So that link locals have meaning */
1265 if (!sk->sk_bound_dev_if &&
1266 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1267 treq->iif = inet6_iif(skb);
493f377d
DM
1268
1269 if (want_cookie) {
2bbdf389
FW
1270 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1271 req->cookie_ts = tmp_opt.tstamp_ok;
493f377d
DM
1272 goto have_isn;
1273 }
1274
1275 /* VJ's idea. We save last timestamp seen
1276 * from the destination in peer table, when entering
1277 * state TIME-WAIT, and check against it before
1278 * accepting new connection request.
1279 *
1280 * If "isn" is not zero, this request hit alive
1281 * timewait bucket, so that all the necessary checks
1282 * are made in the function processing timewait state.
1283 */
1284 if (tmp_opt.saw_tstamp &&
1285 tcp_death_row.sysctl_tw_recycle &&
1286 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1287 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
7a71ed89 1288 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
493f377d
DM
1289 &treq->rmt_addr)) {
1290 inet_peer_refcheck(peer);
1291 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1292 (s32)(peer->tcp_ts - req->ts_recent) >
1293 TCP_PAWS_WINDOW) {
1294 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1295 goto drop_and_release;
1296 }
1297 }
1298 /* Kill the following clause, if you dislike this way. */
1299 else if (!sysctl_tcp_syncookies &&
1300 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1301 (sysctl_max_syn_backlog >> 2)) &&
1302 (!peer || !peer->tcp_ts_stamp) &&
1303 (!dst || !dst_metric(dst, RTAX_RTT))) {
1304 /* Without syncookies last quarter of
1305 * backlog is filled with destinations,
1306 * proven to be alive.
1307 * It means that we continue to communicate
1308 * to destinations, already remembered
1309 * to the moment of synflood.
1310 */
1311 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1312 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1313 goto drop_and_release;
2bbdf389 1314 }
493f377d
DM
1315
1316 isn = tcp_v6_init_sequence(skb);
c6aefafb 1317 }
493f377d 1318have_isn:
2e6599cb 1319 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1320 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1321
4237c75c
VY
1322 security_inet_conn_request(sk, skb, req);
1323
4957faad
WAS
1324 if (tcp_v6_send_synack(sk, req,
1325 (struct request_values *)&tmp_ext) ||
1326 want_cookie)
e6b4d113 1327 goto drop_and_free;
1da177e4 1328
e6b4d113
WAS
1329 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1330 return 0;
1da177e4 1331
493f377d
DM
1332drop_and_release:
1333 dst_release(dst);
e6b4d113
WAS
1334drop_and_free:
1335 reqsk_free(req);
1da177e4 1336drop:
1da177e4
LT
1337 return 0; /* don't send reset */
1338}
1339
1340static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1341 struct request_sock *req,
1da177e4
LT
1342 struct dst_entry *dst)
1343{
78d15e82 1344 struct inet6_request_sock *treq;
1da177e4
LT
1345 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1346 struct tcp6_sock *newtcp6sk;
1347 struct inet_sock *newinet;
1348 struct tcp_sock *newtp;
1349 struct sock *newsk;
1350 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1351#ifdef CONFIG_TCP_MD5SIG
1352 struct tcp_md5sig_key *key;
1353#endif
1da177e4
LT
1354
1355 if (skb->protocol == htons(ETH_P_IP)) {
1356 /*
1357 * v6 mapped
1358 */
1359
1360 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1361
1ab1457c 1362 if (newsk == NULL)
1da177e4
LT
1363 return NULL;
1364
1365 newtcp6sk = (struct tcp6_sock *)newsk;
1366 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1367
1368 newinet = inet_sk(newsk);
1369 newnp = inet6_sk(newsk);
1370 newtp = tcp_sk(newsk);
1371
1372 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1373
c720c7e8 1374 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1375
c720c7e8 1376 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4
LT
1377
1378 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1379
8292a17a 1380 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1381 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1382#ifdef CONFIG_TCP_MD5SIG
1383 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1384#endif
1385
676a1184
YZ
1386 newnp->ipv6_ac_list = NULL;
1387 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1388 newnp->pktoptions = NULL;
1389 newnp->opt = NULL;
505cbfc5 1390 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1391 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1392
e6848976
ACM
1393 /*
1394 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1395 * here, tcp_create_openreq_child now does this for us, see the comment in
1396 * that function for the gory details. -acme
1da177e4 1397 */
1da177e4
LT
1398
1399 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1400 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1401 Sync it now.
1402 */
d83d8461 1403 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1404
1405 return newsk;
1406 }
1407
78d15e82 1408 treq = inet6_rsk(req);
1da177e4
LT
1409 opt = np->opt;
1410
1411 if (sk_acceptq_is_full(sk))
1412 goto out_overflow;
1413
493f377d
DM
1414 if (!dst) {
1415 dst = inet6_csk_route_req(sk, req);
1416 if (!dst)
1da177e4 1417 goto out;
1ab1457c 1418 }
1da177e4
LT
1419
1420 newsk = tcp_create_openreq_child(sk, req, skb);
1421 if (newsk == NULL)
093d2823 1422 goto out_nonewsk;
1da177e4 1423
e6848976
ACM
1424 /*
1425 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1426 * count here, tcp_create_openreq_child now does this for us, see the
1427 * comment in that function for the gory details. -acme
1428 */
1da177e4 1429
59eed279 1430 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1431 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1432
1433 newtcp6sk = (struct tcp6_sock *)newsk;
1434 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1435
1436 newtp = tcp_sk(newsk);
1437 newinet = inet_sk(newsk);
1438 newnp = inet6_sk(newsk);
1439
1440 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1441
2e6599cb
ACM
1442 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1443 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1444 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1445 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1446
1ab1457c 1447 /* Now IPv6 options...
1da177e4
LT
1448
1449 First: no IPv4 options.
1450 */
f6d8bd05 1451 newinet->inet_opt = NULL;
676a1184 1452 newnp->ipv6_ac_list = NULL;
d35690be 1453 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1454
1455 /* Clone RX bits */
1456 newnp->rxopt.all = np->rxopt.all;
1457
1458 /* Clone pktoptions received with SYN */
1459 newnp->pktoptions = NULL;
2e6599cb
ACM
1460 if (treq->pktopts != NULL) {
1461 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1462 kfree_skb(treq->pktopts);
1463 treq->pktopts = NULL;
1da177e4
LT
1464 if (newnp->pktoptions)
1465 skb_set_owner_r(newnp->pktoptions, newsk);
1466 }
1467 newnp->opt = NULL;
505cbfc5 1468 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1469 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1470
1471 /* Clone native IPv6 options from listening socket (if any)
1472
1473 Yes, keeping reference count would be much more clever,
1474 but we make one more one thing there: reattach optmem
1475 to newsk.
1476 */
1477 if (opt) {
1478 newnp->opt = ipv6_dup_options(newsk, opt);
1479 if (opt != np->opt)
1480 sock_kfree_s(sk, opt, opt->tot_len);
1481 }
1482
d83d8461 1483 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1484 if (newnp->opt)
d83d8461
ACM
1485 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1486 newnp->opt->opt_flen);
1da177e4 1487
5d424d5a 1488 tcp_mtup_init(newsk);
1da177e4 1489 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1490 newtp->advmss = dst_metric_advmss(dst);
1da177e4 1491 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1492 if (tcp_rsk(req)->snt_synack)
1493 tcp_valid_rtt_meas(newsk,
1494 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1495 newtp->total_retrans = req->retrans;
1da177e4 1496
c720c7e8
ED
1497 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1498 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1499
cfb6eeb4
YH
1500#ifdef CONFIG_TCP_MD5SIG
1501 /* Copy over the MD5 key from the original socket */
1502 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1503 /* We're using one, so create a matching key
1504 * on the newsk structure. If we fail to get
1505 * memory, then we end up not copying the key
1506 * across. Shucks.
1507 */
af879cc7
ACM
1508 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1509 if (newkey != NULL)
e547bc1e 1510 tcp_v6_md5_do_add(newsk, &newnp->daddr,
cfb6eeb4 1511 newkey, key->keylen);
cfb6eeb4
YH
1512 }
1513#endif
1514
093d2823
BS
1515 if (__inet_inherit_port(sk, newsk) < 0) {
1516 sock_put(newsk);
1517 goto out;
1518 }
9327f705 1519 __inet6_hash(newsk, NULL);
1da177e4
LT
1520
1521 return newsk;
1522
1523out_overflow:
de0744af 1524 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1525out_nonewsk:
1da177e4
LT
1526 if (opt && opt != np->opt)
1527 sock_kfree_s(sk, opt, opt->tot_len);
1528 dst_release(dst);
093d2823
BS
1529out:
1530 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1531 return NULL;
1532}
1533
b51655b9 1534static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1535{
84fa7933 1536 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1537 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1538 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1539 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1540 return 0;
fb286bb2 1541 }
1da177e4 1542 }
fb286bb2 1543
684f2176 1544 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1545 &ipv6_hdr(skb)->saddr,
1546 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1547
1da177e4 1548 if (skb->len <= 76) {
fb286bb2 1549 return __skb_checksum_complete(skb);
1da177e4
LT
1550 }
1551 return 0;
1552}
1553
1554/* The socket must have it's spinlock held when we get
1555 * here.
1556 *
1557 * We have a potential double-lock case here, so even when
1558 * doing backlog processing we use the BH locking scheme.
1559 * This is because we cannot sleep with the original spinlock
1560 * held.
1561 */
1562static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1563{
1564 struct ipv6_pinfo *np = inet6_sk(sk);
1565 struct tcp_sock *tp;
1566 struct sk_buff *opt_skb = NULL;
1567
1568 /* Imagine: socket is IPv6. IPv4 packet arrives,
1569 goes to IPv4 receive handler and backlogged.
1570 From backlog it always goes here. Kerboom...
1571 Fortunately, tcp_rcv_established and rcv_established
1572 handle them correctly, but it is not case with
1573 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1574 */
1575
1576 if (skb->protocol == htons(ETH_P_IP))
1577 return tcp_v4_do_rcv(sk, skb);
1578
cfb6eeb4
YH
1579#ifdef CONFIG_TCP_MD5SIG
1580 if (tcp_v6_inbound_md5_hash (sk, skb))
1581 goto discard;
1582#endif
1583
fda9ef5d 1584 if (sk_filter(sk, skb))
1da177e4
LT
1585 goto discard;
1586
1587 /*
1588 * socket locking is here for SMP purposes as backlog rcv
1589 * is currently called with bh processing disabled.
1590 */
1591
1592 /* Do Stevens' IPV6_PKTOPTIONS.
1593
1594 Yes, guys, it is the only place in our code, where we
1595 may make it not affecting IPv4.
1596 The rest of code is protocol independent,
1597 and I do not like idea to uglify IPv4.
1598
1599 Actually, all the idea behind IPV6_PKTOPTIONS
1600 looks not very well thought. For now we latch
1601 options, received in the last packet, enqueued
1602 by tcp. Feel free to propose better solution.
1ab1457c 1603 --ANK (980728)
1da177e4
LT
1604 */
1605 if (np->rxopt.all)
1606 opt_skb = skb_clone(skb, GFP_ATOMIC);
1607
1608 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
47482f13 1609 sock_rps_save_rxhash(sk, skb->rxhash);
aa8223c7 1610 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1611 goto reset;
1da177e4
LT
1612 if (opt_skb)
1613 goto ipv6_pktoptions;
1614 return 0;
1615 }
1616
ab6a5bb6 1617 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1618 goto csum_err;
1619
1ab1457c 1620 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1621 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1622 if (!nsk)
1623 goto discard;
1624
1625 /*
1626 * Queue it on the new socket if the new socket is active,
1627 * otherwise we just shortcircuit this and continue with
1628 * the new socket..
1629 */
1ab1457c 1630 if(nsk != sk) {
1eddcead 1631 sock_rps_save_rxhash(nsk, skb->rxhash);
1da177e4
LT
1632 if (tcp_child_process(sk, nsk, skb))
1633 goto reset;
1634 if (opt_skb)
1635 __kfree_skb(opt_skb);
1636 return 0;
1637 }
47482f13
NH
1638 } else
1639 sock_rps_save_rxhash(sk, skb->rxhash);
1da177e4 1640
aa8223c7 1641 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1642 goto reset;
1da177e4
LT
1643 if (opt_skb)
1644 goto ipv6_pktoptions;
1645 return 0;
1646
1647reset:
cfb6eeb4 1648 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1649discard:
1650 if (opt_skb)
1651 __kfree_skb(opt_skb);
1652 kfree_skb(skb);
1653 return 0;
1654csum_err:
63231bdd 1655 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1656 goto discard;
1657
1658
1659ipv6_pktoptions:
1660 /* Do you ask, what is it?
1661
1662 1. skb was enqueued by tcp.
1663 2. skb is added to tail of read queue, rather than out of order.
1664 3. socket is not in passive state.
1665 4. Finally, it really contains options, which user wants to receive.
1666 */
1667 tp = tcp_sk(sk);
1668 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1669 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1670 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1671 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1672 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1673 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1674 if (ipv6_opt_accepted(sk, opt_skb)) {
1675 skb_set_owner_r(opt_skb, sk);
1676 opt_skb = xchg(&np->pktoptions, opt_skb);
1677 } else {
1678 __kfree_skb(opt_skb);
1679 opt_skb = xchg(&np->pktoptions, NULL);
1680 }
1681 }
1682
800d55f1 1683 kfree_skb(opt_skb);
1da177e4
LT
1684 return 0;
1685}
1686
e5bbef20 1687static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1688{
1ab1457c 1689 struct tcphdr *th;
b71d1d42 1690 const struct ipv6hdr *hdr;
1da177e4
LT
1691 struct sock *sk;
1692 int ret;
a86b1e30 1693 struct net *net = dev_net(skb->dev);
1da177e4
LT
1694
1695 if (skb->pkt_type != PACKET_HOST)
1696 goto discard_it;
1697
1698 /*
1699 * Count it even if it's bad.
1700 */
63231bdd 1701 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1702
1703 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1704 goto discard_it;
1705
aa8223c7 1706 th = tcp_hdr(skb);
1da177e4
LT
1707
1708 if (th->doff < sizeof(struct tcphdr)/4)
1709 goto bad_packet;
1710 if (!pskb_may_pull(skb, th->doff*4))
1711 goto discard_it;
1712
60476372 1713 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1714 goto bad_packet;
1715
aa8223c7 1716 th = tcp_hdr(skb);
e802af9c 1717 hdr = ipv6_hdr(skb);
1da177e4
LT
1718 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1719 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1720 skb->len - th->doff*4);
1721 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1722 TCP_SKB_CB(skb)->when = 0;
e802af9c 1723 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1da177e4
LT
1724 TCP_SKB_CB(skb)->sacked = 0;
1725
9a1f27c4 1726 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1727 if (!sk)
1728 goto no_tcp_socket;
1729
1730process:
1731 if (sk->sk_state == TCP_TIME_WAIT)
1732 goto do_time_wait;
1733
e802af9c
SH
1734 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1735 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1736 goto discard_and_relse;
1737 }
1738
1da177e4
LT
1739 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1740 goto discard_and_relse;
1741
fda9ef5d 1742 if (sk_filter(sk, skb))
1da177e4
LT
1743 goto discard_and_relse;
1744
1745 skb->dev = NULL;
1746
293b9c42 1747 bh_lock_sock_nested(sk);
1da177e4
LT
1748 ret = 0;
1749 if (!sock_owned_by_user(sk)) {
1a2449a8 1750#ifdef CONFIG_NET_DMA
1ab1457c 1751 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1752 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1753 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1754 if (tp->ucopy.dma_chan)
1755 ret = tcp_v6_do_rcv(sk, skb);
1756 else
1a2449a8
CL
1757#endif
1758 {
1759 if (!tcp_prequeue(sk, skb))
1760 ret = tcp_v6_do_rcv(sk, skb);
1761 }
6cce09f8 1762 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1763 bh_unlock_sock(sk);
6cce09f8 1764 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1765 goto discard_and_relse;
1766 }
1da177e4
LT
1767 bh_unlock_sock(sk);
1768
1769 sock_put(sk);
1770 return ret ? -1 : 0;
1771
1772no_tcp_socket:
1773 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1774 goto discard_it;
1775
1776 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1777bad_packet:
63231bdd 1778 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1779 } else {
cfb6eeb4 1780 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1781 }
1782
1783discard_it:
1784
1785 /*
1786 * Discard frame
1787 */
1788
1789 kfree_skb(skb);
1790 return 0;
1791
1792discard_and_relse:
1793 sock_put(sk);
1794 goto discard_it;
1795
1796do_time_wait:
1797 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1798 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1799 goto discard_it;
1800 }
1801
1802 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1803 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1804 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1805 goto discard_it;
1806 }
1807
9469c7b4 1808 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1809 case TCP_TW_SYN:
1810 {
1811 struct sock *sk2;
1812
c346dca1 1813 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1814 &ipv6_hdr(skb)->daddr,
505cbfc5 1815 ntohs(th->dest), inet6_iif(skb));
1da177e4 1816 if (sk2 != NULL) {
295ff7ed
ACM
1817 struct inet_timewait_sock *tw = inet_twsk(sk);
1818 inet_twsk_deschedule(tw, &tcp_death_row);
1819 inet_twsk_put(tw);
1da177e4
LT
1820 sk = sk2;
1821 goto process;
1822 }
1823 /* Fall through to ACK */
1824 }
1825 case TCP_TW_ACK:
1826 tcp_v6_timewait_ack(sk, skb);
1827 break;
1828 case TCP_TW_RST:
1829 goto no_tcp_socket;
1830 case TCP_TW_SUCCESS:;
1831 }
1832 goto discard_it;
1833}
1834
ccb7c410
DM
1835static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1836{
db3949c4
DM
1837 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1838 struct ipv6_pinfo *np = inet6_sk(sk);
1839 struct inet_peer *peer;
1840
1841 if (!rt ||
1842 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1843 peer = inet_getpeer_v6(&np->daddr, 1);
1844 *release_it = true;
1845 } else {
1846 if (!rt->rt6i_peer)
1847 rt6_bind_peer(rt, 1);
1848 peer = rt->rt6i_peer;
457de438 1849 *release_it = false;
db3949c4
DM
1850 }
1851
1852 return peer;
ccb7c410
DM
1853}
1854
1855static void *tcp_v6_tw_get_peer(struct sock *sk)
1da177e4 1856{
db3949c4 1857 struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
ccb7c410
DM
1858 struct inet_timewait_sock *tw = inet_twsk(sk);
1859
1860 if (tw->tw_family == AF_INET)
1861 return tcp_v4_tw_get_peer(sk);
1862
db3949c4 1863 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1da177e4
LT
1864}
1865
ccb7c410
DM
1866static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1867 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1868 .twsk_unique = tcp_twsk_unique,
1869 .twsk_destructor= tcp_twsk_destructor,
1870 .twsk_getpeer = tcp_v6_tw_get_peer,
1871};
1872
3b401a81 1873static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1874 .queue_xmit = inet6_csk_xmit,
1875 .send_check = tcp_v6_send_check,
1876 .rebuild_header = inet6_sk_rebuild_header,
1877 .conn_request = tcp_v6_conn_request,
1878 .syn_recv_sock = tcp_v6_syn_recv_sock,
3f419d2d 1879 .get_peer = tcp_v6_get_peer,
543d9cfe
ACM
1880 .net_header_len = sizeof(struct ipv6hdr),
1881 .setsockopt = ipv6_setsockopt,
1882 .getsockopt = ipv6_getsockopt,
1883 .addr2sockaddr = inet6_csk_addr2sockaddr,
1884 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1885 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1886#ifdef CONFIG_COMPAT
543d9cfe
ACM
1887 .compat_setsockopt = compat_ipv6_setsockopt,
1888 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1889#endif
1da177e4
LT
1890};
1891
cfb6eeb4 1892#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1893static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1894 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1895 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1896 .md5_add = tcp_v6_md5_add_func,
1897 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1898};
a928630a 1899#endif
cfb6eeb4 1900
1da177e4
LT
1901/*
1902 * TCP over IPv4 via INET6 API
1903 */
1904
3b401a81 1905static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1906 .queue_xmit = ip_queue_xmit,
1907 .send_check = tcp_v4_send_check,
1908 .rebuild_header = inet_sk_rebuild_header,
1909 .conn_request = tcp_v6_conn_request,
1910 .syn_recv_sock = tcp_v6_syn_recv_sock,
3f419d2d 1911 .get_peer = tcp_v4_get_peer,
543d9cfe
ACM
1912 .net_header_len = sizeof(struct iphdr),
1913 .setsockopt = ipv6_setsockopt,
1914 .getsockopt = ipv6_getsockopt,
1915 .addr2sockaddr = inet6_csk_addr2sockaddr,
1916 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1917 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1918#ifdef CONFIG_COMPAT
543d9cfe
ACM
1919 .compat_setsockopt = compat_ipv6_setsockopt,
1920 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1921#endif
1da177e4
LT
1922};
1923
cfb6eeb4 1924#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1925static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1926 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1927 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1928 .md5_add = tcp_v6_md5_add_func,
1929 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1930};
a928630a 1931#endif
cfb6eeb4 1932
1da177e4
LT
1933/* NOTE: A lot of things set to zero explicitly by call to
1934 * sk_alloc() so need not be done here.
1935 */
1936static int tcp_v6_init_sock(struct sock *sk)
1937{
6687e988 1938 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1939 struct tcp_sock *tp = tcp_sk(sk);
1940
1941 skb_queue_head_init(&tp->out_of_order_queue);
1942 tcp_init_xmit_timers(sk);
1943 tcp_prequeue_init(tp);
1944
6687e988 1945 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1946 tp->mdev = TCP_TIMEOUT_INIT;
1947
1948 /* So many TCP implementations out there (incorrectly) count the
1949 * initial SYN frame in their delayed-ACK and congestion control
1950 * algorithms that we must have the following bandaid to talk
1951 * efficiently to them. -DaveM
1952 */
1953 tp->snd_cwnd = 2;
1954
1955 /* See draft-stevens-tcpca-spec-01 for discussion of the
1956 * initialization of these values.
1957 */
0b6a05c1 1958 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1959 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1960 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1961
1962 tp->reordering = sysctl_tcp_reordering;
1963
1964 sk->sk_state = TCP_CLOSE;
1965
8292a17a 1966 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1967 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1968 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1969 sk->sk_write_space = sk_stream_write_space;
1970 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1971
cfb6eeb4
YH
1972#ifdef CONFIG_TCP_MD5SIG
1973 tp->af_specific = &tcp_sock_ipv6_specific;
1974#endif
1975
435cf559
WAS
1976 /* TCP Cookie Transactions */
1977 if (sysctl_tcp_cookie_size > 0) {
1978 /* Default, cookies without s_data_payload. */
1979 tp->cookie_values =
1980 kzalloc(sizeof(*tp->cookie_values),
1981 sk->sk_allocation);
1982 if (tp->cookie_values != NULL)
1983 kref_init(&tp->cookie_values->kref);
1984 }
1985 /* Presumed zeroed, in order of appearance:
1986 * cookie_in_always, cookie_out_never,
1987 * s_data_constant, s_data_in, s_data_out
1988 */
1da177e4
LT
1989 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1990 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1991
eb4dea58 1992 local_bh_disable();
1748376b 1993 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1994 local_bh_enable();
1da177e4
LT
1995
1996 return 0;
1997}
1998
7d06b2e0 1999static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 2000{
cfb6eeb4
YH
2001#ifdef CONFIG_TCP_MD5SIG
2002 /* Clean up the MD5 key list */
2003 if (tcp_sk(sk)->md5sig_info)
2004 tcp_v6_clear_md5_list(sk);
2005#endif
1da177e4 2006 tcp_v4_destroy_sock(sk);
7d06b2e0 2007 inet6_destroy_sock(sk);
1da177e4
LT
2008}
2009
952a10be 2010#ifdef CONFIG_PROC_FS
1da177e4 2011/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 2012static void get_openreq6(struct seq_file *seq,
60236fdd 2013 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 2014{
1da177e4 2015 int ttd = req->expires - jiffies;
b71d1d42
ED
2016 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2017 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
2018
2019 if (ttd < 0)
2020 ttd = 0;
2021
1da177e4
LT
2022 seq_printf(seq,
2023 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2024 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
2025 i,
2026 src->s6_addr32[0], src->s6_addr32[1],
2027 src->s6_addr32[2], src->s6_addr32[3],
fd507037 2028 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
2029 dest->s6_addr32[0], dest->s6_addr32[1],
2030 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 2031 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
2032 TCP_SYN_RECV,
2033 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
2034 1, /* timers active (only the expire timer) */
2035 jiffies_to_clock_t(ttd),
1da177e4
LT
2036 req->retrans,
2037 uid,
1ab1457c 2038 0, /* non standard timer */
1da177e4
LT
2039 0, /* open_requests have no inode */
2040 0, req);
2041}
2042
2043static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2044{
b71d1d42 2045 const struct in6_addr *dest, *src;
1da177e4
LT
2046 __u16 destp, srcp;
2047 int timer_active;
2048 unsigned long timer_expires;
2049 struct inet_sock *inet = inet_sk(sp);
2050 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 2051 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
2052 struct ipv6_pinfo *np = inet6_sk(sp);
2053
2054 dest = &np->daddr;
2055 src = &np->rcv_saddr;
c720c7e8
ED
2056 destp = ntohs(inet->inet_dport);
2057 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
2058
2059 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2060 timer_active = 1;
463c84b9
ACM
2061 timer_expires = icsk->icsk_timeout;
2062 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2063 timer_active = 4;
463c84b9 2064 timer_expires = icsk->icsk_timeout;
1da177e4
LT
2065 } else if (timer_pending(&sp->sk_timer)) {
2066 timer_active = 2;
2067 timer_expires = sp->sk_timer.expires;
2068 } else {
2069 timer_active = 0;
2070 timer_expires = jiffies;
2071 }
2072
2073 seq_printf(seq,
2074 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2075 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
2076 i,
2077 src->s6_addr32[0], src->s6_addr32[1],
2078 src->s6_addr32[2], src->s6_addr32[3], srcp,
2079 dest->s6_addr32[0], dest->s6_addr32[1],
2080 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 2081 sp->sk_state,
47da8ee6
SS
2082 tp->write_seq-tp->snd_una,
2083 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2084 timer_active,
2085 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2086 icsk->icsk_retransmits,
1da177e4 2087 sock_i_uid(sp),
6687e988 2088 icsk->icsk_probes_out,
1da177e4
LT
2089 sock_i_ino(sp),
2090 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
2091 jiffies_to_clock_t(icsk->icsk_rto),
2092 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2093 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
2094 tp->snd_cwnd,
2095 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
2096 );
2097}
2098
1ab1457c 2099static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 2100 struct inet_timewait_sock *tw, int i)
1da177e4 2101{
b71d1d42 2102 const struct in6_addr *dest, *src;
1da177e4 2103 __u16 destp, srcp;
0fa1a53e 2104 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
2105 int ttd = tw->tw_ttd - jiffies;
2106
2107 if (ttd < 0)
2108 ttd = 0;
2109
0fa1a53e
ACM
2110 dest = &tw6->tw_v6_daddr;
2111 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
2112 destp = ntohs(tw->tw_dport);
2113 srcp = ntohs(tw->tw_sport);
2114
2115 seq_printf(seq,
2116 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2117 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
2118 i,
2119 src->s6_addr32[0], src->s6_addr32[1],
2120 src->s6_addr32[2], src->s6_addr32[3], srcp,
2121 dest->s6_addr32[0], dest->s6_addr32[1],
2122 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2123 tw->tw_substate, 0, 0,
2124 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2125 atomic_read(&tw->tw_refcnt), tw);
2126}
2127
1da177e4
LT
2128static int tcp6_seq_show(struct seq_file *seq, void *v)
2129{
2130 struct tcp_iter_state *st;
2131
2132 if (v == SEQ_START_TOKEN) {
2133 seq_puts(seq,
2134 " sl "
2135 "local_address "
2136 "remote_address "
2137 "st tx_queue rx_queue tr tm->when retrnsmt"
2138 " uid timeout inode\n");
2139 goto out;
2140 }
2141 st = seq->private;
2142
2143 switch (st->state) {
2144 case TCP_SEQ_STATE_LISTENING:
2145 case TCP_SEQ_STATE_ESTABLISHED:
2146 get_tcp6_sock(seq, v, st->num);
2147 break;
2148 case TCP_SEQ_STATE_OPENREQ:
2149 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2150 break;
2151 case TCP_SEQ_STATE_TIME_WAIT:
2152 get_timewait6_sock(seq, v, st->num);
2153 break;
2154 }
2155out:
2156 return 0;
2157}
2158
1da177e4 2159static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2160 .name = "tcp6",
2161 .family = AF_INET6,
5f4472c5
DL
2162 .seq_fops = {
2163 .owner = THIS_MODULE,
2164 },
9427c4b3
DL
2165 .seq_ops = {
2166 .show = tcp6_seq_show,
2167 },
1da177e4
LT
2168};
2169
2c8c1e72 2170int __net_init tcp6_proc_init(struct net *net)
1da177e4 2171{
6f8b13bc 2172 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2173}
2174
6f8b13bc 2175void tcp6_proc_exit(struct net *net)
1da177e4 2176{
6f8b13bc 2177 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2178}
2179#endif
2180
2181struct proto tcpv6_prot = {
2182 .name = "TCPv6",
2183 .owner = THIS_MODULE,
2184 .close = tcp_close,
2185 .connect = tcp_v6_connect,
2186 .disconnect = tcp_disconnect,
463c84b9 2187 .accept = inet_csk_accept,
1da177e4
LT
2188 .ioctl = tcp_ioctl,
2189 .init = tcp_v6_init_sock,
2190 .destroy = tcp_v6_destroy_sock,
2191 .shutdown = tcp_shutdown,
2192 .setsockopt = tcp_setsockopt,
2193 .getsockopt = tcp_getsockopt,
1da177e4 2194 .recvmsg = tcp_recvmsg,
7ba42910
CG
2195 .sendmsg = tcp_sendmsg,
2196 .sendpage = tcp_sendpage,
1da177e4
LT
2197 .backlog_rcv = tcp_v6_do_rcv,
2198 .hash = tcp_v6_hash,
ab1e0a13
ACM
2199 .unhash = inet_unhash,
2200 .get_port = inet_csk_get_port,
1da177e4
LT
2201 .enter_memory_pressure = tcp_enter_memory_pressure,
2202 .sockets_allocated = &tcp_sockets_allocated,
2203 .memory_allocated = &tcp_memory_allocated,
2204 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2205 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2206 .sysctl_mem = sysctl_tcp_mem,
2207 .sysctl_wmem = sysctl_tcp_wmem,
2208 .sysctl_rmem = sysctl_tcp_rmem,
2209 .max_header = MAX_TCP_HEADER,
2210 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2211 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2212 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2213 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2214 .h.hashinfo = &tcp_hashinfo,
7ba42910 2215 .no_autobind = true,
543d9cfe
ACM
2216#ifdef CONFIG_COMPAT
2217 .compat_setsockopt = compat_tcp_setsockopt,
2218 .compat_getsockopt = compat_tcp_getsockopt,
2219#endif
1da177e4
LT
2220};
2221
41135cc8 2222static const struct inet6_protocol tcpv6_protocol = {
1da177e4
LT
2223 .handler = tcp_v6_rcv,
2224 .err_handler = tcp_v6_err,
a430a43d 2225 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2226 .gso_segment = tcp_tso_segment,
684f2176
HX
2227 .gro_receive = tcp6_gro_receive,
2228 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2229 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2230};
2231
1da177e4
LT
2232static struct inet_protosw tcpv6_protosw = {
2233 .type = SOCK_STREAM,
2234 .protocol = IPPROTO_TCP,
2235 .prot = &tcpv6_prot,
2236 .ops = &inet6_stream_ops,
1da177e4 2237 .no_check = 0,
d83d8461
ACM
2238 .flags = INET_PROTOSW_PERMANENT |
2239 INET_PROTOSW_ICSK,
1da177e4
LT
2240};
2241
2c8c1e72 2242static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2243{
5677242f
DL
2244 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2245 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2246}
2247
2c8c1e72 2248static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2249{
5677242f 2250 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2251}
2252
2c8c1e72 2253static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2254{
2255 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2256}
2257
2258static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2259 .init = tcpv6_net_init,
2260 .exit = tcpv6_net_exit,
2261 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2262};
2263
7f4e4868 2264int __init tcpv6_init(void)
1da177e4 2265{
7f4e4868
DL
2266 int ret;
2267
2268 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2269 if (ret)
2270 goto out;
2271
1da177e4 2272 /* register inet6 protocol */
7f4e4868
DL
2273 ret = inet6_register_protosw(&tcpv6_protosw);
2274 if (ret)
2275 goto out_tcpv6_protocol;
2276
93ec926b 2277 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2278 if (ret)
2279 goto out_tcpv6_protosw;
2280out:
2281 return ret;
ae0f7d5f 2282
7f4e4868
DL
2283out_tcpv6_protocol:
2284 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2285out_tcpv6_protosw:
2286 inet6_unregister_protosw(&tcpv6_protosw);
2287 goto out;
2288}
2289
09f7709f 2290void tcpv6_exit(void)
7f4e4868 2291{
93ec926b 2292 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2293 inet6_unregister_protosw(&tcpv6_protosw);
2294 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2295}