rps: Some minor cleanup in get_rps_cpus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
6e5714ea 64#include <net/secure_seq.h>
1da177e4
LT
65
66#include <asm/uaccess.h>
67
68#include <linux/proc_fs.h>
69#include <linux/seq_file.h>
70
cfb6eeb4
YH
71#include <linux/crypto.h>
72#include <linux/scatterlist.h>
73
cfb6eeb4 74static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
75static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
1da177e4
LT
77
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
8ad50d96 79static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42
ED
80 const struct in6_addr *saddr,
81 const struct in6_addr *daddr);
1da177e4 82
3b401a81
SH
83static const struct inet_connection_sock_af_ops ipv6_mapped;
84static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 85#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
86static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
87static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
88#else
89static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 90 const struct in6_addr *addr)
9501f972
YH
91{
92 return NULL;
93}
a928630a 94#endif
1da177e4 95
1da177e4
LT
96static void tcp_v6_hash(struct sock *sk)
97{
98 if (sk->sk_state != TCP_CLOSE) {
8292a17a 99 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
100 tcp_prot.hash(sk);
101 return;
102 }
103 local_bh_disable();
9327f705 104 __inet6_hash(sk, NULL);
1da177e4
LT
105 local_bh_enable();
106 }
107}
108
684f2176 109static __inline__ __sum16 tcp_v6_check(int len,
b71d1d42
ED
110 const struct in6_addr *saddr,
111 const struct in6_addr *daddr,
868c86bc 112 __wsum base)
1da177e4
LT
113{
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
115}
116
a94f723d 117static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 118{
0660e03f
ACM
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
121 tcp_hdr(skb)->dest,
122 tcp_hdr(skb)->source);
1da177e4
LT
123}
124
1ab1457c 125static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
126 int addr_len)
127{
128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 129 struct inet_sock *inet = inet_sk(sk);
d83d8461 130 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 133 struct in6_addr *saddr = NULL, *final_p, final;
493f377d 134 struct rt6_info *rt;
4c9483b2 135 struct flowi6 fl6;
1da177e4
LT
136 struct dst_entry *dst;
137 int addr_type;
138 int err;
139
1ab1457c 140 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
141 return -EINVAL;
142
1ab1457c 143 if (usin->sin6_family != AF_INET6)
a02cec21 144 return -EAFNOSUPPORT;
1da177e4 145
4c9483b2 146 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
147
148 if (np->sndflow) {
4c9483b2
DM
149 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
150 IP6_ECN_flow_init(fl6.flowlabel);
151 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 152 struct ip6_flowlabel *flowlabel;
4c9483b2 153 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1da177e4
LT
154 if (flowlabel == NULL)
155 return -EINVAL;
156 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
157 fl6_sock_release(flowlabel);
158 }
159 }
160
161 /*
1ab1457c
YH
162 * connect() to INADDR_ANY means loopback (BSD'ism).
163 */
164
165 if(ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
167
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
169
170 if(addr_type & IPV6_ADDR_MULTICAST)
171 return -ENETUNREACH;
172
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
177 * must coincide.
178 */
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 return -EINVAL;
182
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 }
185
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
188 return -EINVAL;
189 }
190
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
195 tp->write_seq = 0;
196 }
197
198 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
4c9483b2 199 np->flow_label = fl6.flowlabel;
1da177e4
LT
200
201 /*
202 * TCP over IPv4
203 */
204
205 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
207 struct sockaddr_in sin;
208
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
210
211 if (__ipv6_only_sock(sk))
212 return -ENETUNREACH;
213
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
217
d83d8461 218 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
220#ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222#endif
1da177e4
LT
223
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
225
226 if (err) {
d83d8461
ACM
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
230#ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
232#endif
1da177e4
LT
233 goto failure;
234 } else {
c720c7e8
ED
235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
237 &np->rcv_saddr);
1da177e4
LT
238 }
239
240 return err;
241 }
242
243 if (!ipv6_addr_any(&np->rcv_saddr))
244 saddr = &np->rcv_saddr;
245
4c9483b2
DM
246 fl6.flowi6_proto = IPPROTO_TCP;
247 ipv6_addr_copy(&fl6.daddr, &np->daddr);
248 ipv6_addr_copy(&fl6.saddr,
1da177e4 249 (saddr ? saddr : &np->saddr));
4c9483b2
DM
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
1da177e4 254
4c9483b2 255 final_p = fl6_update_dst(&fl6, np->opt, &final);
1da177e4 256
4c9483b2 257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 258
4c9483b2 259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
68d0c6d3
DM
260 if (IS_ERR(dst)) {
261 err = PTR_ERR(dst);
1da177e4 262 goto failure;
14e50e57 263 }
1da177e4
LT
264
265 if (saddr == NULL) {
4c9483b2 266 saddr = &fl6.saddr;
1da177e4
LT
267 ipv6_addr_copy(&np->rcv_saddr, saddr);
268 }
269
270 /* set the source address */
271 ipv6_addr_copy(&np->saddr, saddr);
c720c7e8 272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 273
f83ef8c0 274 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 275 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 276
493f377d
DM
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
282 /*
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
287 */
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
293 }
294 }
295 }
296
d83d8461 297 icsk->icsk_ext_hdr_len = 0;
1da177e4 298 if (np->opt)
d83d8461
ACM
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 np->opt->opt_nflen);
1da177e4
LT
301
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303
c720c7e8 304 inet->inet_dport = usin->sin6_port;
1da177e4
LT
305
306 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 307 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
308 if (err)
309 goto late_failure;
310
311 if (!tp->write_seq)
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 np->daddr.s6_addr32,
c720c7e8
ED
314 inet->inet_sport,
315 inet->inet_dport);
1da177e4
LT
316
317 err = tcp_connect(sk);
318 if (err)
319 goto late_failure;
320
321 return 0;
322
323late_failure:
324 tcp_set_state(sk, TCP_CLOSE);
325 __sk_dst_reset(sk);
326failure:
c720c7e8 327 inet->inet_dport = 0;
1da177e4
LT
328 sk->sk_route_caps = 0;
329 return err;
330}
331
332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 333 u8 type, u8 code, int offset, __be32 info)
1da177e4 334{
b71d1d42 335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
505cbfc5 336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
337 struct ipv6_pinfo *np;
338 struct sock *sk;
339 int err;
1ab1457c 340 struct tcp_sock *tp;
1da177e4 341 __u32 seq;
ca12a1a4 342 struct net *net = dev_net(skb->dev);
1da177e4 343
ca12a1a4 344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
346
347 if (sk == NULL) {
e41b5368
DL
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 ICMP6_MIB_INERRORS);
1da177e4
LT
350 return;
351 }
352
353 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 354 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
355 return;
356 }
357
358 bh_lock_sock(sk);
359 if (sock_owned_by_user(sk))
de0744af 360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
361
362 if (sk->sk_state == TCP_CLOSE)
363 goto out;
364
e802af9c
SH
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 goto out;
368 }
369
1da177e4 370 tp = tcp_sk(sk);
1ab1457c 371 seq = ntohl(th->seq);
1da177e4
LT
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
375 goto out;
376 }
377
378 np = inet6_sk(sk);
379
380 if (type == ICMPV6_PKT_TOOBIG) {
68d0c6d3 381 struct dst_entry *dst;
1da177e4
LT
382
383 if (sock_owned_by_user(sk))
384 goto out;
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 goto out;
387
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
390
391 if (dst == NULL) {
392 struct inet_sock *inet = inet_sk(sk);
4c9483b2 393 struct flowi6 fl6;
1da177e4
LT
394
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
397 for now.
398 */
4c9483b2
DM
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 ipv6_addr_copy(&fl6.daddr, &np->daddr);
402 ipv6_addr_copy(&fl6.saddr, &np->saddr);
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
4c9483b2
DM
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
68d0c6d3
DM
410 if (IS_ERR(dst)) {
411 sk->sk_err_soft = -PTR_ERR(dst);
1da177e4
LT
412 goto out;
413 }
414
415 } else
416 dst_hold(dst);
417
d83d8461 418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
422 dst_release(dst);
423 goto out;
424 }
425
426 icmpv6_err_convert(type, code, &err);
427
60236fdd 428 /* Might be for an request_sock */
1da177e4 429 switch (sk->sk_state) {
60236fdd 430 struct request_sock *req, **prev;
1da177e4
LT
431 case TCP_LISTEN:
432 if (sock_owned_by_user(sk))
433 goto out;
434
8129765a
ACM
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
437 if (!req)
438 goto out;
439
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
442 */
547b792c 443 WARN_ON(req->sk != NULL);
1da177e4 444
2e6599cb 445 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
447 goto out;
448 }
449
463c84b9 450 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
451 goto out;
452
453 case TCP_SYN_SENT:
454 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 455 It can, it SYNs are crossed. --ANK */
1da177e4 456 if (!sock_owned_by_user(sk)) {
1da177e4
LT
457 sk->sk_err = err;
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
459
460 tcp_done(sk);
461 } else
462 sk->sk_err_soft = err;
463 goto out;
464 }
465
466 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_err = err;
468 sk->sk_error_report(sk);
469 } else
470 sk->sk_err_soft = err;
471
472out:
473 bh_unlock_sock(sk);
474 sock_put(sk);
475}
476
477
e6b4d113
WAS
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp)
1da177e4 480{
ca304b61 481 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
482 struct ipv6_pinfo *np = inet6_sk(sk);
483 struct sk_buff * skb;
484 struct ipv6_txoptions *opt = NULL;
20c59de2 485 struct in6_addr * final_p, final;
4c9483b2 486 struct flowi6 fl6;
fd80eb94 487 struct dst_entry *dst;
68d0c6d3 488 int err;
1da177e4 489
4c9483b2
DM
490 memset(&fl6, 0, sizeof(fl6));
491 fl6.flowi6_proto = IPPROTO_TCP;
492 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
493 ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
494 fl6.flowlabel = 0;
495 fl6.flowi6_oif = treq->iif;
496 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
497 fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 fl6.fl6_sport = inet_rsk(req)->loc_port;
4c9483b2 499 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
1da177e4 500
fd80eb94 501 opt = np->opt;
4c9483b2 502 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 503
4c9483b2 504 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
68d0c6d3
DM
505 if (IS_ERR(dst)) {
506 err = PTR_ERR(dst);
738faca3 507 dst = NULL;
fd80eb94 508 goto done;
68d0c6d3 509 }
e6b4d113 510 skb = tcp_make_synack(sk, dst, req, rvp);
68d0c6d3 511 err = -ENOMEM;
1da177e4 512 if (skb) {
8ad50d96 513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
1da177e4 514
4c9483b2
DM
515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
516 err = ip6_xmit(sk, skb, &fl6, opt);
b9df3cb8 517 err = net_xmit_eval(err);
1da177e4
LT
518 }
519
520done:
1ab1457c 521 if (opt && opt != np->opt)
1da177e4 522 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 523 dst_release(dst);
1da177e4
LT
524 return err;
525}
526
72659ecc
OP
527static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp)
529{
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp);
532}
533
c6aefafb
GG
534static inline void syn_flood_warning(struct sk_buff *skb)
535{
536#ifdef CONFIG_SYN_COOKIES
537 if (sysctl_tcp_syncookies)
538 printk(KERN_INFO
539 "TCPv6: Possible SYN flooding on port %d. "
540 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
541 else
542#endif
543 printk(KERN_INFO
544 "TCPv6: Possible SYN flooding on port %d. "
545 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
546}
547
60236fdd 548static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 549{
800d55f1 550 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
551}
552
cfb6eeb4
YH
553#ifdef CONFIG_TCP_MD5SIG
554static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 555 const struct in6_addr *addr)
cfb6eeb4
YH
556{
557 struct tcp_sock *tp = tcp_sk(sk);
558 int i;
559
560 BUG_ON(tp == NULL);
561
562 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
563 return NULL;
564
565 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 566 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 567 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
568 }
569 return NULL;
570}
571
572static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
573 struct sock *addr_sk)
574{
575 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
576}
577
578static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
579 struct request_sock *req)
580{
581 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
582}
583
b71d1d42 584static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
cfb6eeb4
YH
585 char *newkey, u8 newkeylen)
586{
587 /* Add key to the list */
b0a713e9 588 struct tcp_md5sig_key *key;
cfb6eeb4
YH
589 struct tcp_sock *tp = tcp_sk(sk);
590 struct tcp6_md5sig_key *keys;
591
b0a713e9 592 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
593 if (key) {
594 /* modify existing entry - just update that one */
b0a713e9
MD
595 kfree(key->key);
596 key->key = newkey;
597 key->keylen = newkeylen;
cfb6eeb4
YH
598 } else {
599 /* reallocate new list if current one is full. */
600 if (!tp->md5sig_info) {
601 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
602 if (!tp->md5sig_info) {
603 kfree(newkey);
604 return -ENOMEM;
605 }
a465419b 606 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4 607 }
aa133076 608 if (tcp_alloc_md5sig_pool(sk) == NULL) {
aacbe8c8
YH
609 kfree(newkey);
610 return -ENOMEM;
611 }
cfb6eeb4
YH
612 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
613 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
614 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
615
616 if (!keys) {
617 tcp_free_md5sig_pool();
618 kfree(newkey);
619 return -ENOMEM;
620 }
621
622 if (tp->md5sig_info->entries6)
623 memmove(keys, tp->md5sig_info->keys6,
624 (sizeof (tp->md5sig_info->keys6[0]) *
625 tp->md5sig_info->entries6));
626
627 kfree(tp->md5sig_info->keys6);
628 tp->md5sig_info->keys6 = keys;
629 tp->md5sig_info->alloced6++;
630 }
631
632 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
633 peer);
f8ab18d2
DM
634 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
635 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
636
637 tp->md5sig_info->entries6++;
638 }
639 return 0;
640}
641
642static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
643 u8 *newkey, __u8 newkeylen)
644{
645 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
646 newkey, newkeylen);
647}
648
b71d1d42 649static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
cfb6eeb4
YH
650{
651 struct tcp_sock *tp = tcp_sk(sk);
652 int i;
653
654 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 655 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 656 /* Free the key */
f8ab18d2 657 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
658 tp->md5sig_info->entries6--;
659
660 if (tp->md5sig_info->entries6 == 0) {
661 kfree(tp->md5sig_info->keys6);
662 tp->md5sig_info->keys6 = NULL;
ca983cef 663 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
664 } else {
665 /* shrink the database */
666 if (tp->md5sig_info->entries6 != i)
667 memmove(&tp->md5sig_info->keys6[i],
668 &tp->md5sig_info->keys6[i+1],
669 (tp->md5sig_info->entries6 - i)
670 * sizeof (tp->md5sig_info->keys6[0]));
671 }
77adefdc
YH
672 tcp_free_md5sig_pool();
673 return 0;
cfb6eeb4
YH
674 }
675 }
676 return -ENOENT;
677}
678
679static void tcp_v6_clear_md5_list (struct sock *sk)
680{
681 struct tcp_sock *tp = tcp_sk(sk);
682 int i;
683
684 if (tp->md5sig_info->entries6) {
685 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 686 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
687 tp->md5sig_info->entries6 = 0;
688 tcp_free_md5sig_pool();
689 }
690
691 kfree(tp->md5sig_info->keys6);
692 tp->md5sig_info->keys6 = NULL;
693 tp->md5sig_info->alloced6 = 0;
694
695 if (tp->md5sig_info->entries4) {
696 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 697 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
698 tp->md5sig_info->entries4 = 0;
699 tcp_free_md5sig_pool();
700 }
701
702 kfree(tp->md5sig_info->keys4);
703 tp->md5sig_info->keys4 = NULL;
704 tp->md5sig_info->alloced4 = 0;
705}
706
707static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
708 int optlen)
709{
710 struct tcp_md5sig cmd;
711 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
712 u8 *newkey;
713
714 if (optlen < sizeof(cmd))
715 return -EINVAL;
716
717 if (copy_from_user(&cmd, optval, sizeof(cmd)))
718 return -EFAULT;
719
720 if (sin6->sin6_family != AF_INET6)
721 return -EINVAL;
722
723 if (!cmd.tcpm_keylen) {
724 if (!tcp_sk(sk)->md5sig_info)
725 return -ENOENT;
e773e4fa 726 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
727 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
728 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
729 }
730
731 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
732 return -EINVAL;
733
734 if (!tcp_sk(sk)->md5sig_info) {
735 struct tcp_sock *tp = tcp_sk(sk);
736 struct tcp_md5sig_info *p;
737
738 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
739 if (!p)
740 return -ENOMEM;
741
742 tp->md5sig_info = p;
a465419b 743 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
744 }
745
af879cc7 746 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
747 if (!newkey)
748 return -ENOMEM;
e773e4fa 749 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
750 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
751 newkey, cmd.tcpm_keylen);
752 }
753 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
754}
755
49a72dfb 756static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
b71d1d42
ED
757 const struct in6_addr *daddr,
758 const struct in6_addr *saddr, int nbytes)
cfb6eeb4 759{
cfb6eeb4 760 struct tcp6_pseudohdr *bp;
49a72dfb 761 struct scatterlist sg;
8d26d76d 762
cfb6eeb4 763 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
764 /* 1. TCP pseudo-header (RFC2460) */
765 ipv6_addr_copy(&bp->saddr, saddr);
766 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 767 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 768 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 769
49a72dfb
AL
770 sg_init_one(&sg, bp, sizeof(*bp));
771 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
772}
c7da57a1 773
49a72dfb 774static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
b71d1d42 775 const struct in6_addr *daddr, struct in6_addr *saddr,
49a72dfb
AL
776 struct tcphdr *th)
777{
778 struct tcp_md5sig_pool *hp;
779 struct hash_desc *desc;
780
781 hp = tcp_get_md5sig_pool();
782 if (!hp)
783 goto clear_hash_noput;
784 desc = &hp->md5_desc;
785
786 if (crypto_hash_init(desc))
787 goto clear_hash;
788 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
789 goto clear_hash;
790 if (tcp_md5_hash_header(hp, th))
791 goto clear_hash;
792 if (tcp_md5_hash_key(hp, key))
793 goto clear_hash;
794 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 795 goto clear_hash;
cfb6eeb4 796
cfb6eeb4 797 tcp_put_md5sig_pool();
cfb6eeb4 798 return 0;
49a72dfb 799
cfb6eeb4
YH
800clear_hash:
801 tcp_put_md5sig_pool();
802clear_hash_noput:
803 memset(md5_hash, 0, 16);
49a72dfb 804 return 1;
cfb6eeb4
YH
805}
806
49a72dfb
AL
807static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
808 struct sock *sk, struct request_sock *req,
809 struct sk_buff *skb)
cfb6eeb4 810{
b71d1d42 811 const struct in6_addr *saddr, *daddr;
49a72dfb
AL
812 struct tcp_md5sig_pool *hp;
813 struct hash_desc *desc;
814 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
815
816 if (sk) {
817 saddr = &inet6_sk(sk)->saddr;
818 daddr = &inet6_sk(sk)->daddr;
49a72dfb 819 } else if (req) {
cfb6eeb4
YH
820 saddr = &inet6_rsk(req)->loc_addr;
821 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb 822 } else {
b71d1d42 823 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
824 saddr = &ip6h->saddr;
825 daddr = &ip6h->daddr;
cfb6eeb4 826 }
49a72dfb
AL
827
828 hp = tcp_get_md5sig_pool();
829 if (!hp)
830 goto clear_hash_noput;
831 desc = &hp->md5_desc;
832
833 if (crypto_hash_init(desc))
834 goto clear_hash;
835
836 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
837 goto clear_hash;
838 if (tcp_md5_hash_header(hp, th))
839 goto clear_hash;
840 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
841 goto clear_hash;
842 if (tcp_md5_hash_key(hp, key))
843 goto clear_hash;
844 if (crypto_hash_final(desc, md5_hash))
845 goto clear_hash;
846
847 tcp_put_md5sig_pool();
848 return 0;
849
850clear_hash:
851 tcp_put_md5sig_pool();
852clear_hash_noput:
853 memset(md5_hash, 0, 16);
854 return 1;
cfb6eeb4
YH
855}
856
857static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
858{
859 __u8 *hash_location = NULL;
860 struct tcp_md5sig_key *hash_expected;
b71d1d42 861 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 862 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 863 int genhash;
cfb6eeb4
YH
864 u8 newhash[16];
865
866 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 867 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 868
785957d3
DM
869 /* We've parsed the options - do we have a hash? */
870 if (!hash_expected && !hash_location)
871 return 0;
872
873 if (hash_expected && !hash_location) {
874 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
875 return 1;
876 }
877
785957d3
DM
878 if (!hash_expected && hash_location) {
879 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
880 return 1;
881 }
882
883 /* check the signature */
49a72dfb
AL
884 genhash = tcp_v6_md5_hash_skb(newhash,
885 hash_expected,
886 NULL, NULL, skb);
887
cfb6eeb4
YH
888 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
889 if (net_ratelimit()) {
5856b606 890 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
cfb6eeb4 891 genhash ? "failed" : "mismatch",
0c6ce78a
HH
892 &ip6h->saddr, ntohs(th->source),
893 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
894 }
895 return 1;
896 }
897 return 0;
898}
899#endif
900
c6aefafb 901struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 902 .family = AF_INET6,
2e6599cb 903 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 904 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
905 .send_ack = tcp_v6_reqsk_send_ack,
906 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
907 .send_reset = tcp_v6_send_reset,
908 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
909};
910
cfb6eeb4 911#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 912static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 913 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 914 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 915};
b6332e6c 916#endif
cfb6eeb4 917
8ad50d96 918static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42 919 const struct in6_addr *saddr, const struct in6_addr *daddr)
1da177e4 920{
aa8223c7 921 struct tcphdr *th = tcp_hdr(skb);
1da177e4 922
84fa7933 923 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8ad50d96 924 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
663ead3b 925 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 926 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 927 } else {
8ad50d96
HX
928 th->check = tcp_v6_check(skb->len, saddr, daddr,
929 csum_partial(th, th->doff << 2,
930 skb->csum));
1da177e4
LT
931 }
932}
933
bb296246 934static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
8ad50d96
HX
935{
936 struct ipv6_pinfo *np = inet6_sk(sk);
937
938 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
939}
940
a430a43d
HX
941static int tcp_v6_gso_send_check(struct sk_buff *skb)
942{
b71d1d42 943 const struct ipv6hdr *ipv6h;
a430a43d
HX
944 struct tcphdr *th;
945
946 if (!pskb_may_pull(skb, sizeof(*th)))
947 return -EINVAL;
948
0660e03f 949 ipv6h = ipv6_hdr(skb);
aa8223c7 950 th = tcp_hdr(skb);
a430a43d
HX
951
952 th->check = 0;
84fa7933 953 skb->ip_summed = CHECKSUM_PARTIAL;
8ad50d96 954 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
a430a43d
HX
955 return 0;
956}
1da177e4 957
36990673
HX
958static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
959 struct sk_buff *skb)
684f2176 960{
b71d1d42 961 const struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
962
963 switch (skb->ip_summed) {
964 case CHECKSUM_COMPLETE:
86911732 965 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
966 skb->csum)) {
967 skb->ip_summed = CHECKSUM_UNNECESSARY;
968 break;
969 }
970
971 /* fall through */
972 case CHECKSUM_NONE:
973 NAPI_GRO_CB(skb)->flush = 1;
974 return NULL;
975 }
976
977 return tcp_gro_receive(head, skb);
978}
684f2176 979
36990673 980static int tcp6_gro_complete(struct sk_buff *skb)
684f2176 981{
b71d1d42 982 const struct ipv6hdr *iph = ipv6_hdr(skb);
684f2176
HX
983 struct tcphdr *th = tcp_hdr(skb);
984
985 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
986 &iph->saddr, &iph->daddr, 0);
987 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
988
989 return tcp_gro_complete(skb);
990}
684f2176 991
626e264d
IJ
992static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
993 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 994{
aa8223c7 995 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4 996 struct sk_buff *buff;
4c9483b2 997 struct flowi6 fl6;
adf30907 998 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 999 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 1000 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 1001 struct dst_entry *dst;
81ada62d 1002 __be32 *topt;
1da177e4 1003
626e264d
IJ
1004 if (ts)
1005 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 1006#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1007 if (key)
1008 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1009#endif
1010
cfb6eeb4 1011 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 1012 GFP_ATOMIC);
1ab1457c
YH
1013 if (buff == NULL)
1014 return;
1da177e4 1015
cfb6eeb4 1016 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1017
cfb6eeb4 1018 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 1019 skb_reset_transport_header(buff);
1da177e4
LT
1020
1021 /* Swap the send and the receive. */
1022 memset(t1, 0, sizeof(*t1));
1023 t1->dest = th->source;
1024 t1->source = th->dest;
cfb6eeb4 1025 t1->doff = tot_len / 4;
626e264d
IJ
1026 t1->seq = htonl(seq);
1027 t1->ack_seq = htonl(ack);
1028 t1->ack = !rst || !th->ack;
1029 t1->rst = rst;
1030 t1->window = htons(win);
1da177e4 1031
81ada62d
IJ
1032 topt = (__be32 *)(t1 + 1);
1033
626e264d
IJ
1034 if (ts) {
1035 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1036 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1037 *topt++ = htonl(tcp_time_stamp);
1038 *topt++ = htonl(ts);
1039 }
1040
cfb6eeb4
YH
1041#ifdef CONFIG_TCP_MD5SIG
1042 if (key) {
81ada62d
IJ
1043 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1044 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1045 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1046 &ipv6_hdr(skb)->saddr,
1047 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1048 }
1049#endif
1050
4c9483b2
DM
1051 memset(&fl6, 0, sizeof(fl6));
1052 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1053 ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
1da177e4 1054
e5700aff
DM
1055 buff->ip_summed = CHECKSUM_PARTIAL;
1056 buff->csum = 0;
1057
4c9483b2 1058 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 1059
4c9483b2
DM
1060 fl6.flowi6_proto = IPPROTO_TCP;
1061 fl6.flowi6_oif = inet6_iif(skb);
1958b856
DM
1062 fl6.fl6_dport = t1->dest;
1063 fl6.fl6_sport = t1->source;
4c9483b2 1064 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 1065
c20121ae
DL
1066 /* Pass a socket to ip6_dst_lookup either it is for RST
1067 * Underlying function will use this to retrieve the network
1068 * namespace
1069 */
4c9483b2 1070 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
68d0c6d3
DM
1071 if (!IS_ERR(dst)) {
1072 skb_dst_set(buff, dst);
4c9483b2 1073 ip6_xmit(ctl_sk, buff, &fl6, NULL);
68d0c6d3
DM
1074 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1075 if (rst)
1076 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1077 return;
1da177e4
LT
1078 }
1079
1080 kfree_skb(buff);
1081}
1082
626e264d 1083static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1084{
626e264d
IJ
1085 struct tcphdr *th = tcp_hdr(skb);
1086 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1087 struct tcp_md5sig_key *key = NULL;
1da177e4 1088
626e264d 1089 if (th->rst)
1da177e4
LT
1090 return;
1091
626e264d
IJ
1092 if (!ipv6_unicast_destination(skb))
1093 return;
1da177e4 1094
cfb6eeb4 1095#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1096 if (sk)
1097 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1098#endif
1099
626e264d
IJ
1100 if (th->ack)
1101 seq = ntohl(th->ack_seq);
1102 else
1103 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1104 (th->doff << 2);
1da177e4 1105
626e264d
IJ
1106 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1107}
1da177e4 1108
626e264d
IJ
1109static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1110 struct tcp_md5sig_key *key)
1111{
1112 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1113}
1114
1115static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1116{
8feaf0c0 1117 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1118 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1119
9501f972 1120 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1121 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1122 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1123
8feaf0c0 1124 inet_twsk_put(tw);
1da177e4
LT
1125}
1126
6edafaaf
GJ
1127static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1128 struct request_sock *req)
1da177e4 1129{
9501f972 1130 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1131 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1132}
1133
1134
1135static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1136{
60236fdd 1137 struct request_sock *req, **prev;
aa8223c7 1138 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1139 struct sock *nsk;
1140
1141 /* Find possible connection requests. */
8129765a 1142 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1143 &ipv6_hdr(skb)->saddr,
1144 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1145 if (req)
1146 return tcp_check_req(sk, skb, req, prev);
1147
3b1e0a65 1148 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1149 &ipv6_hdr(skb)->saddr, th->source,
1150 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1151
1152 if (nsk) {
1153 if (nsk->sk_state != TCP_TIME_WAIT) {
1154 bh_lock_sock(nsk);
1155 return nsk;
1156 }
9469c7b4 1157 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1158 return NULL;
1159 }
1160
c6aefafb 1161#ifdef CONFIG_SYN_COOKIES
af9b4738 1162 if (!th->syn)
c6aefafb 1163 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1164#endif
1165 return sk;
1166}
1167
1da177e4
LT
1168/* FIXME: this is substantially similar to the ipv4 code.
1169 * Can some kind of merge be done? -- erics
1170 */
1171static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1172{
4957faad 1173 struct tcp_extend_values tmp_ext;
e6b4d113 1174 struct tcp_options_received tmp_opt;
4957faad 1175 u8 *hash_location;
e6b4d113 1176 struct request_sock *req;
ca304b61 1177 struct inet6_request_sock *treq;
1da177e4 1178 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1179 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1180 __u32 isn = TCP_SKB_CB(skb)->when;
493f377d 1181 struct dst_entry *dst = NULL;
c6aefafb
GG
1182#ifdef CONFIG_SYN_COOKIES
1183 int want_cookie = 0;
1184#else
1185#define want_cookie 0
1186#endif
1da177e4
LT
1187
1188 if (skb->protocol == htons(ETH_P_IP))
1189 return tcp_v4_conn_request(sk, skb);
1190
1191 if (!ipv6_unicast_destination(skb))
1ab1457c 1192 goto drop;
1da177e4 1193
463c84b9 1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1195 if (net_ratelimit())
c6aefafb
GG
1196 syn_flood_warning(skb);
1197#ifdef CONFIG_SYN_COOKIES
1198 if (sysctl_tcp_syncookies)
1199 want_cookie = 1;
1200 else
1201#endif
1ab1457c 1202 goto drop;
1da177e4
LT
1203 }
1204
463c84b9 1205 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1206 goto drop;
1207
ca304b61 1208 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1209 if (req == NULL)
1210 goto drop;
1211
cfb6eeb4
YH
1212#ifdef CONFIG_TCP_MD5SIG
1213 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1214#endif
1215
1da177e4
LT
1216 tcp_clear_options(&tmp_opt);
1217 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1218 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1219 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1220
1221 if (tmp_opt.cookie_plus > 0 &&
1222 tmp_opt.saw_tstamp &&
1223 !tp->rx_opt.cookie_out_never &&
1224 (sysctl_tcp_cookie_size > 0 ||
1225 (tp->cookie_values != NULL &&
1226 tp->cookie_values->cookie_desired > 0))) {
1227 u8 *c;
1228 u32 *d;
1229 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1230 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1231
1232 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1233 goto drop_and_free;
1234
1235 /* Secret recipe starts with IP addresses */
0eae88f3 1236 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
4957faad
WAS
1237 *mess++ ^= *d++;
1238 *mess++ ^= *d++;
1239 *mess++ ^= *d++;
1240 *mess++ ^= *d++;
0eae88f3 1241 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
4957faad
WAS
1242 *mess++ ^= *d++;
1243 *mess++ ^= *d++;
1244 *mess++ ^= *d++;
1245 *mess++ ^= *d++;
1246
1247 /* plus variable length Initiator Cookie */
1248 c = (u8 *)mess;
1249 while (l-- > 0)
1250 *c++ ^= *hash_location++;
1da177e4 1251
4957faad
WAS
1252#ifdef CONFIG_SYN_COOKIES
1253 want_cookie = 0; /* not our kind of cookie */
1254#endif
1255 tmp_ext.cookie_out_never = 0; /* false */
1256 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257 } else if (!tp->rx_opt.cookie_in_always) {
1258 /* redundant indications, but ensure initialization. */
1259 tmp_ext.cookie_out_never = 1; /* true */
1260 tmp_ext.cookie_plus = 0;
1261 } else {
1262 goto drop_and_free;
1263 }
1264 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1265
4dfc2817 1266 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1267 tcp_clear_options(&tmp_opt);
c6aefafb 1268
1da177e4
LT
1269 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1270 tcp_openreq_init(req, &tmp_opt, skb);
1271
ca304b61 1272 treq = inet6_rsk(req);
0660e03f
ACM
1273 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1274 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
172d69e6 1275 if (!want_cookie || tmp_opt.tstamp_ok)
c6aefafb
GG
1276 TCP_ECN_create_request(req, tcp_hdr(skb));
1277
2bbdf389 1278 if (!isn) {
493f377d
DM
1279 struct inet_peer *peer = NULL;
1280
c6aefafb
GG
1281 if (ipv6_opt_accepted(sk, skb) ||
1282 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1283 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1284 atomic_inc(&skb->users);
1285 treq->pktopts = skb;
1286 }
1287 treq->iif = sk->sk_bound_dev_if;
1da177e4 1288
c6aefafb
GG
1289 /* So that link locals have meaning */
1290 if (!sk->sk_bound_dev_if &&
1291 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1292 treq->iif = inet6_iif(skb);
493f377d
DM
1293
1294 if (want_cookie) {
2bbdf389
FW
1295 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1296 req->cookie_ts = tmp_opt.tstamp_ok;
493f377d
DM
1297 goto have_isn;
1298 }
1299
1300 /* VJ's idea. We save last timestamp seen
1301 * from the destination in peer table, when entering
1302 * state TIME-WAIT, and check against it before
1303 * accepting new connection request.
1304 *
1305 * If "isn" is not zero, this request hit alive
1306 * timewait bucket, so that all the necessary checks
1307 * are made in the function processing timewait state.
1308 */
1309 if (tmp_opt.saw_tstamp &&
1310 tcp_death_row.sysctl_tw_recycle &&
1311 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1312 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
7a71ed89 1313 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
493f377d
DM
1314 &treq->rmt_addr)) {
1315 inet_peer_refcheck(peer);
1316 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1317 (s32)(peer->tcp_ts - req->ts_recent) >
1318 TCP_PAWS_WINDOW) {
1319 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1320 goto drop_and_release;
1321 }
1322 }
1323 /* Kill the following clause, if you dislike this way. */
1324 else if (!sysctl_tcp_syncookies &&
1325 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1326 (sysctl_max_syn_backlog >> 2)) &&
1327 (!peer || !peer->tcp_ts_stamp) &&
1328 (!dst || !dst_metric(dst, RTAX_RTT))) {
1329 /* Without syncookies last quarter of
1330 * backlog is filled with destinations,
1331 * proven to be alive.
1332 * It means that we continue to communicate
1333 * to destinations, already remembered
1334 * to the moment of synflood.
1335 */
1336 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1337 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1338 goto drop_and_release;
2bbdf389 1339 }
493f377d
DM
1340
1341 isn = tcp_v6_init_sequence(skb);
c6aefafb 1342 }
493f377d 1343have_isn:
2e6599cb 1344 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1345 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1346
4237c75c
VY
1347 security_inet_conn_request(sk, skb, req);
1348
4957faad
WAS
1349 if (tcp_v6_send_synack(sk, req,
1350 (struct request_values *)&tmp_ext) ||
1351 want_cookie)
e6b4d113 1352 goto drop_and_free;
1da177e4 1353
e6b4d113
WAS
1354 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1355 return 0;
1da177e4 1356
493f377d
DM
1357drop_and_release:
1358 dst_release(dst);
e6b4d113
WAS
1359drop_and_free:
1360 reqsk_free(req);
1da177e4 1361drop:
1da177e4
LT
1362 return 0; /* don't send reset */
1363}
1364
1365static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1366 struct request_sock *req,
1da177e4
LT
1367 struct dst_entry *dst)
1368{
78d15e82 1369 struct inet6_request_sock *treq;
1da177e4
LT
1370 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1371 struct tcp6_sock *newtcp6sk;
1372 struct inet_sock *newinet;
1373 struct tcp_sock *newtp;
1374 struct sock *newsk;
1375 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1376#ifdef CONFIG_TCP_MD5SIG
1377 struct tcp_md5sig_key *key;
1378#endif
1da177e4
LT
1379
1380 if (skb->protocol == htons(ETH_P_IP)) {
1381 /*
1382 * v6 mapped
1383 */
1384
1385 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1386
1ab1457c 1387 if (newsk == NULL)
1da177e4
LT
1388 return NULL;
1389
1390 newtcp6sk = (struct tcp6_sock *)newsk;
1391 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1392
1393 newinet = inet_sk(newsk);
1394 newnp = inet6_sk(newsk);
1395 newtp = tcp_sk(newsk);
1396
1397 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1398
c720c7e8 1399 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1400
c720c7e8 1401 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4
LT
1402
1403 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1404
8292a17a 1405 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1406 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1407#ifdef CONFIG_TCP_MD5SIG
1408 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1409#endif
1410
1da177e4
LT
1411 newnp->pktoptions = NULL;
1412 newnp->opt = NULL;
505cbfc5 1413 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1414 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1415
e6848976
ACM
1416 /*
1417 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1418 * here, tcp_create_openreq_child now does this for us, see the comment in
1419 * that function for the gory details. -acme
1da177e4 1420 */
1da177e4
LT
1421
1422 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1423 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1424 Sync it now.
1425 */
d83d8461 1426 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1427
1428 return newsk;
1429 }
1430
78d15e82 1431 treq = inet6_rsk(req);
1da177e4
LT
1432 opt = np->opt;
1433
1434 if (sk_acceptq_is_full(sk))
1435 goto out_overflow;
1436
493f377d
DM
1437 if (!dst) {
1438 dst = inet6_csk_route_req(sk, req);
1439 if (!dst)
1da177e4 1440 goto out;
1ab1457c 1441 }
1da177e4
LT
1442
1443 newsk = tcp_create_openreq_child(sk, req, skb);
1444 if (newsk == NULL)
093d2823 1445 goto out_nonewsk;
1da177e4 1446
e6848976
ACM
1447 /*
1448 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1449 * count here, tcp_create_openreq_child now does this for us, see the
1450 * comment in that function for the gory details. -acme
1451 */
1da177e4 1452
59eed279 1453 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1454 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1455
1456 newtcp6sk = (struct tcp6_sock *)newsk;
1457 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1458
1459 newtp = tcp_sk(newsk);
1460 newinet = inet_sk(newsk);
1461 newnp = inet6_sk(newsk);
1462
1463 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1464
2e6599cb
ACM
1465 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1466 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1467 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1468 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1469
1ab1457c 1470 /* Now IPv6 options...
1da177e4
LT
1471
1472 First: no IPv4 options.
1473 */
f6d8bd05 1474 newinet->inet_opt = NULL;
d35690be 1475 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1476
1477 /* Clone RX bits */
1478 newnp->rxopt.all = np->rxopt.all;
1479
1480 /* Clone pktoptions received with SYN */
1481 newnp->pktoptions = NULL;
2e6599cb
ACM
1482 if (treq->pktopts != NULL) {
1483 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1484 kfree_skb(treq->pktopts);
1485 treq->pktopts = NULL;
1da177e4
LT
1486 if (newnp->pktoptions)
1487 skb_set_owner_r(newnp->pktoptions, newsk);
1488 }
1489 newnp->opt = NULL;
505cbfc5 1490 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1491 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1492
1493 /* Clone native IPv6 options from listening socket (if any)
1494
1495 Yes, keeping reference count would be much more clever,
1496 but we make one more one thing there: reattach optmem
1497 to newsk.
1498 */
1499 if (opt) {
1500 newnp->opt = ipv6_dup_options(newsk, opt);
1501 if (opt != np->opt)
1502 sock_kfree_s(sk, opt, opt->tot_len);
1503 }
1504
d83d8461 1505 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1506 if (newnp->opt)
d83d8461
ACM
1507 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1508 newnp->opt->opt_flen);
1da177e4 1509
5d424d5a 1510 tcp_mtup_init(newsk);
1da177e4 1511 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1512 newtp->advmss = dst_metric_advmss(dst);
1da177e4 1513 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1514 if (tcp_rsk(req)->snt_synack)
1515 tcp_valid_rtt_meas(newsk,
1516 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1517 newtp->total_retrans = req->retrans;
1da177e4 1518
c720c7e8
ED
1519 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1520 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1521
cfb6eeb4
YH
1522#ifdef CONFIG_TCP_MD5SIG
1523 /* Copy over the MD5 key from the original socket */
1524 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1525 /* We're using one, so create a matching key
1526 * on the newsk structure. If we fail to get
1527 * memory, then we end up not copying the key
1528 * across. Shucks.
1529 */
af879cc7
ACM
1530 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1531 if (newkey != NULL)
e547bc1e 1532 tcp_v6_md5_do_add(newsk, &newnp->daddr,
cfb6eeb4 1533 newkey, key->keylen);
cfb6eeb4
YH
1534 }
1535#endif
1536
093d2823
BS
1537 if (__inet_inherit_port(sk, newsk) < 0) {
1538 sock_put(newsk);
1539 goto out;
1540 }
9327f705 1541 __inet6_hash(newsk, NULL);
1da177e4
LT
1542
1543 return newsk;
1544
1545out_overflow:
de0744af 1546 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1547out_nonewsk:
1da177e4
LT
1548 if (opt && opt != np->opt)
1549 sock_kfree_s(sk, opt, opt->tot_len);
1550 dst_release(dst);
093d2823
BS
1551out:
1552 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1553 return NULL;
1554}
1555
b51655b9 1556static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1557{
84fa7933 1558 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1559 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1560 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1562 return 0;
fb286bb2 1563 }
1da177e4 1564 }
fb286bb2 1565
684f2176 1566 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1567 &ipv6_hdr(skb)->saddr,
1568 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1569
1da177e4 1570 if (skb->len <= 76) {
fb286bb2 1571 return __skb_checksum_complete(skb);
1da177e4
LT
1572 }
1573 return 0;
1574}
1575
1576/* The socket must have it's spinlock held when we get
1577 * here.
1578 *
1579 * We have a potential double-lock case here, so even when
1580 * doing backlog processing we use the BH locking scheme.
1581 * This is because we cannot sleep with the original spinlock
1582 * held.
1583 */
1584static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1585{
1586 struct ipv6_pinfo *np = inet6_sk(sk);
1587 struct tcp_sock *tp;
1588 struct sk_buff *opt_skb = NULL;
1589
1590 /* Imagine: socket is IPv6. IPv4 packet arrives,
1591 goes to IPv4 receive handler and backlogged.
1592 From backlog it always goes here. Kerboom...
1593 Fortunately, tcp_rcv_established and rcv_established
1594 handle them correctly, but it is not case with
1595 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1596 */
1597
1598 if (skb->protocol == htons(ETH_P_IP))
1599 return tcp_v4_do_rcv(sk, skb);
1600
cfb6eeb4
YH
1601#ifdef CONFIG_TCP_MD5SIG
1602 if (tcp_v6_inbound_md5_hash (sk, skb))
1603 goto discard;
1604#endif
1605
fda9ef5d 1606 if (sk_filter(sk, skb))
1da177e4
LT
1607 goto discard;
1608
1609 /*
1610 * socket locking is here for SMP purposes as backlog rcv
1611 * is currently called with bh processing disabled.
1612 */
1613
1614 /* Do Stevens' IPV6_PKTOPTIONS.
1615
1616 Yes, guys, it is the only place in our code, where we
1617 may make it not affecting IPv4.
1618 The rest of code is protocol independent,
1619 and I do not like idea to uglify IPv4.
1620
1621 Actually, all the idea behind IPV6_PKTOPTIONS
1622 looks not very well thought. For now we latch
1623 options, received in the last packet, enqueued
1624 by tcp. Feel free to propose better solution.
1ab1457c 1625 --ANK (980728)
1da177e4
LT
1626 */
1627 if (np->rxopt.all)
1628 opt_skb = skb_clone(skb, GFP_ATOMIC);
1629
1630 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
47482f13 1631 sock_rps_save_rxhash(sk, skb->rxhash);
aa8223c7 1632 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1633 goto reset;
1da177e4
LT
1634 if (opt_skb)
1635 goto ipv6_pktoptions;
1636 return 0;
1637 }
1638
ab6a5bb6 1639 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1640 goto csum_err;
1641
1ab1457c 1642 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1643 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1644 if (!nsk)
1645 goto discard;
1646
1647 /*
1648 * Queue it on the new socket if the new socket is active,
1649 * otherwise we just shortcircuit this and continue with
1650 * the new socket..
1651 */
1ab1457c 1652 if(nsk != sk) {
1eddcead 1653 sock_rps_save_rxhash(nsk, skb->rxhash);
1da177e4
LT
1654 if (tcp_child_process(sk, nsk, skb))
1655 goto reset;
1656 if (opt_skb)
1657 __kfree_skb(opt_skb);
1658 return 0;
1659 }
47482f13
NH
1660 } else
1661 sock_rps_save_rxhash(sk, skb->rxhash);
1da177e4 1662
aa8223c7 1663 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1664 goto reset;
1da177e4
LT
1665 if (opt_skb)
1666 goto ipv6_pktoptions;
1667 return 0;
1668
1669reset:
cfb6eeb4 1670 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1671discard:
1672 if (opt_skb)
1673 __kfree_skb(opt_skb);
1674 kfree_skb(skb);
1675 return 0;
1676csum_err:
63231bdd 1677 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1678 goto discard;
1679
1680
1681ipv6_pktoptions:
1682 /* Do you ask, what is it?
1683
1684 1. skb was enqueued by tcp.
1685 2. skb is added to tail of read queue, rather than out of order.
1686 3. socket is not in passive state.
1687 4. Finally, it really contains options, which user wants to receive.
1688 */
1689 tp = tcp_sk(sk);
1690 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1691 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1692 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1693 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1694 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1695 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1696 if (ipv6_opt_accepted(sk, opt_skb)) {
1697 skb_set_owner_r(opt_skb, sk);
1698 opt_skb = xchg(&np->pktoptions, opt_skb);
1699 } else {
1700 __kfree_skb(opt_skb);
1701 opt_skb = xchg(&np->pktoptions, NULL);
1702 }
1703 }
1704
800d55f1 1705 kfree_skb(opt_skb);
1da177e4
LT
1706 return 0;
1707}
1708
e5bbef20 1709static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1710{
1ab1457c 1711 struct tcphdr *th;
b71d1d42 1712 const struct ipv6hdr *hdr;
1da177e4
LT
1713 struct sock *sk;
1714 int ret;
a86b1e30 1715 struct net *net = dev_net(skb->dev);
1da177e4
LT
1716
1717 if (skb->pkt_type != PACKET_HOST)
1718 goto discard_it;
1719
1720 /*
1721 * Count it even if it's bad.
1722 */
63231bdd 1723 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1724
1725 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1726 goto discard_it;
1727
aa8223c7 1728 th = tcp_hdr(skb);
1da177e4
LT
1729
1730 if (th->doff < sizeof(struct tcphdr)/4)
1731 goto bad_packet;
1732 if (!pskb_may_pull(skb, th->doff*4))
1733 goto discard_it;
1734
60476372 1735 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1736 goto bad_packet;
1737
aa8223c7 1738 th = tcp_hdr(skb);
e802af9c 1739 hdr = ipv6_hdr(skb);
1da177e4
LT
1740 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1741 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1742 skb->len - th->doff*4);
1743 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1744 TCP_SKB_CB(skb)->when = 0;
e802af9c 1745 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1da177e4
LT
1746 TCP_SKB_CB(skb)->sacked = 0;
1747
9a1f27c4 1748 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1749 if (!sk)
1750 goto no_tcp_socket;
1751
1752process:
1753 if (sk->sk_state == TCP_TIME_WAIT)
1754 goto do_time_wait;
1755
e802af9c
SH
1756 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1757 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1758 goto discard_and_relse;
1759 }
1760
1da177e4
LT
1761 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1762 goto discard_and_relse;
1763
fda9ef5d 1764 if (sk_filter(sk, skb))
1da177e4
LT
1765 goto discard_and_relse;
1766
1767 skb->dev = NULL;
1768
293b9c42 1769 bh_lock_sock_nested(sk);
1da177e4
LT
1770 ret = 0;
1771 if (!sock_owned_by_user(sk)) {
1a2449a8 1772#ifdef CONFIG_NET_DMA
1ab1457c 1773 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1774 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1775 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1776 if (tp->ucopy.dma_chan)
1777 ret = tcp_v6_do_rcv(sk, skb);
1778 else
1a2449a8
CL
1779#endif
1780 {
1781 if (!tcp_prequeue(sk, skb))
1782 ret = tcp_v6_do_rcv(sk, skb);
1783 }
6cce09f8 1784 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1785 bh_unlock_sock(sk);
6cce09f8 1786 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1787 goto discard_and_relse;
1788 }
1da177e4
LT
1789 bh_unlock_sock(sk);
1790
1791 sock_put(sk);
1792 return ret ? -1 : 0;
1793
1794no_tcp_socket:
1795 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1796 goto discard_it;
1797
1798 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1799bad_packet:
63231bdd 1800 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1801 } else {
cfb6eeb4 1802 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1803 }
1804
1805discard_it:
1806
1807 /*
1808 * Discard frame
1809 */
1810
1811 kfree_skb(skb);
1812 return 0;
1813
1814discard_and_relse:
1815 sock_put(sk);
1816 goto discard_it;
1817
1818do_time_wait:
1819 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1820 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1821 goto discard_it;
1822 }
1823
1824 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1825 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1826 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1827 goto discard_it;
1828 }
1829
9469c7b4 1830 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1831 case TCP_TW_SYN:
1832 {
1833 struct sock *sk2;
1834
c346dca1 1835 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1836 &ipv6_hdr(skb)->daddr,
505cbfc5 1837 ntohs(th->dest), inet6_iif(skb));
1da177e4 1838 if (sk2 != NULL) {
295ff7ed
ACM
1839 struct inet_timewait_sock *tw = inet_twsk(sk);
1840 inet_twsk_deschedule(tw, &tcp_death_row);
1841 inet_twsk_put(tw);
1da177e4
LT
1842 sk = sk2;
1843 goto process;
1844 }
1845 /* Fall through to ACK */
1846 }
1847 case TCP_TW_ACK:
1848 tcp_v6_timewait_ack(sk, skb);
1849 break;
1850 case TCP_TW_RST:
1851 goto no_tcp_socket;
1852 case TCP_TW_SUCCESS:;
1853 }
1854 goto discard_it;
1855}
1856
ccb7c410
DM
1857static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1858{
db3949c4
DM
1859 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1860 struct ipv6_pinfo *np = inet6_sk(sk);
1861 struct inet_peer *peer;
1862
1863 if (!rt ||
1864 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1865 peer = inet_getpeer_v6(&np->daddr, 1);
1866 *release_it = true;
1867 } else {
1868 if (!rt->rt6i_peer)
1869 rt6_bind_peer(rt, 1);
1870 peer = rt->rt6i_peer;
457de438 1871 *release_it = false;
db3949c4
DM
1872 }
1873
1874 return peer;
ccb7c410
DM
1875}
1876
1877static void *tcp_v6_tw_get_peer(struct sock *sk)
1da177e4 1878{
db3949c4 1879 struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
ccb7c410
DM
1880 struct inet_timewait_sock *tw = inet_twsk(sk);
1881
1882 if (tw->tw_family == AF_INET)
1883 return tcp_v4_tw_get_peer(sk);
1884
db3949c4 1885 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1da177e4
LT
1886}
1887
ccb7c410
DM
1888static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1889 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1890 .twsk_unique = tcp_twsk_unique,
1891 .twsk_destructor= tcp_twsk_destructor,
1892 .twsk_getpeer = tcp_v6_tw_get_peer,
1893};
1894
3b401a81 1895static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1896 .queue_xmit = inet6_csk_xmit,
1897 .send_check = tcp_v6_send_check,
1898 .rebuild_header = inet6_sk_rebuild_header,
1899 .conn_request = tcp_v6_conn_request,
1900 .syn_recv_sock = tcp_v6_syn_recv_sock,
3f419d2d 1901 .get_peer = tcp_v6_get_peer,
543d9cfe
ACM
1902 .net_header_len = sizeof(struct ipv6hdr),
1903 .setsockopt = ipv6_setsockopt,
1904 .getsockopt = ipv6_getsockopt,
1905 .addr2sockaddr = inet6_csk_addr2sockaddr,
1906 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1907 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1908#ifdef CONFIG_COMPAT
543d9cfe
ACM
1909 .compat_setsockopt = compat_ipv6_setsockopt,
1910 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1911#endif
1da177e4
LT
1912};
1913
cfb6eeb4 1914#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1915static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1916 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1917 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1918 .md5_add = tcp_v6_md5_add_func,
1919 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1920};
a928630a 1921#endif
cfb6eeb4 1922
1da177e4
LT
1923/*
1924 * TCP over IPv4 via INET6 API
1925 */
1926
3b401a81 1927static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1928 .queue_xmit = ip_queue_xmit,
1929 .send_check = tcp_v4_send_check,
1930 .rebuild_header = inet_sk_rebuild_header,
1931 .conn_request = tcp_v6_conn_request,
1932 .syn_recv_sock = tcp_v6_syn_recv_sock,
3f419d2d 1933 .get_peer = tcp_v4_get_peer,
543d9cfe
ACM
1934 .net_header_len = sizeof(struct iphdr),
1935 .setsockopt = ipv6_setsockopt,
1936 .getsockopt = ipv6_getsockopt,
1937 .addr2sockaddr = inet6_csk_addr2sockaddr,
1938 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1939 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1940#ifdef CONFIG_COMPAT
543d9cfe
ACM
1941 .compat_setsockopt = compat_ipv6_setsockopt,
1942 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1943#endif
1da177e4
LT
1944};
1945
cfb6eeb4 1946#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1947static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1948 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1949 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1950 .md5_add = tcp_v6_md5_add_func,
1951 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1952};
a928630a 1953#endif
cfb6eeb4 1954
1da177e4
LT
1955/* NOTE: A lot of things set to zero explicitly by call to
1956 * sk_alloc() so need not be done here.
1957 */
1958static int tcp_v6_init_sock(struct sock *sk)
1959{
6687e988 1960 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1961 struct tcp_sock *tp = tcp_sk(sk);
1962
1963 skb_queue_head_init(&tp->out_of_order_queue);
1964 tcp_init_xmit_timers(sk);
1965 tcp_prequeue_init(tp);
1966
6687e988 1967 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1968 tp->mdev = TCP_TIMEOUT_INIT;
1969
1970 /* So many TCP implementations out there (incorrectly) count the
1971 * initial SYN frame in their delayed-ACK and congestion control
1972 * algorithms that we must have the following bandaid to talk
1973 * efficiently to them. -DaveM
1974 */
1975 tp->snd_cwnd = 2;
1976
1977 /* See draft-stevens-tcpca-spec-01 for discussion of the
1978 * initialization of these values.
1979 */
0b6a05c1 1980 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1981 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1982 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1983
1984 tp->reordering = sysctl_tcp_reordering;
1985
1986 sk->sk_state = TCP_CLOSE;
1987
8292a17a 1988 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1989 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1990 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1991 sk->sk_write_space = sk_stream_write_space;
1992 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1993
cfb6eeb4
YH
1994#ifdef CONFIG_TCP_MD5SIG
1995 tp->af_specific = &tcp_sock_ipv6_specific;
1996#endif
1997
435cf559
WAS
1998 /* TCP Cookie Transactions */
1999 if (sysctl_tcp_cookie_size > 0) {
2000 /* Default, cookies without s_data_payload. */
2001 tp->cookie_values =
2002 kzalloc(sizeof(*tp->cookie_values),
2003 sk->sk_allocation);
2004 if (tp->cookie_values != NULL)
2005 kref_init(&tp->cookie_values->kref);
2006 }
2007 /* Presumed zeroed, in order of appearance:
2008 * cookie_in_always, cookie_out_never,
2009 * s_data_constant, s_data_in, s_data_out
2010 */
1da177e4
LT
2011 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2012 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2013
eb4dea58 2014 local_bh_disable();
1748376b 2015 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 2016 local_bh_enable();
1da177e4
LT
2017
2018 return 0;
2019}
2020
7d06b2e0 2021static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 2022{
cfb6eeb4
YH
2023#ifdef CONFIG_TCP_MD5SIG
2024 /* Clean up the MD5 key list */
2025 if (tcp_sk(sk)->md5sig_info)
2026 tcp_v6_clear_md5_list(sk);
2027#endif
1da177e4 2028 tcp_v4_destroy_sock(sk);
7d06b2e0 2029 inet6_destroy_sock(sk);
1da177e4
LT
2030}
2031
952a10be 2032#ifdef CONFIG_PROC_FS
1da177e4 2033/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 2034static void get_openreq6(struct seq_file *seq,
60236fdd 2035 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 2036{
1da177e4 2037 int ttd = req->expires - jiffies;
b71d1d42
ED
2038 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2039 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
2040
2041 if (ttd < 0)
2042 ttd = 0;
2043
1da177e4
LT
2044 seq_printf(seq,
2045 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2046 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
2047 i,
2048 src->s6_addr32[0], src->s6_addr32[1],
2049 src->s6_addr32[2], src->s6_addr32[3],
fd507037 2050 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
2051 dest->s6_addr32[0], dest->s6_addr32[1],
2052 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 2053 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
2054 TCP_SYN_RECV,
2055 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
2056 1, /* timers active (only the expire timer) */
2057 jiffies_to_clock_t(ttd),
1da177e4
LT
2058 req->retrans,
2059 uid,
1ab1457c 2060 0, /* non standard timer */
1da177e4
LT
2061 0, /* open_requests have no inode */
2062 0, req);
2063}
2064
2065static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2066{
b71d1d42 2067 const struct in6_addr *dest, *src;
1da177e4
LT
2068 __u16 destp, srcp;
2069 int timer_active;
2070 unsigned long timer_expires;
2071 struct inet_sock *inet = inet_sk(sp);
2072 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 2073 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
2074 struct ipv6_pinfo *np = inet6_sk(sp);
2075
2076 dest = &np->daddr;
2077 src = &np->rcv_saddr;
c720c7e8
ED
2078 destp = ntohs(inet->inet_dport);
2079 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
2080
2081 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2082 timer_active = 1;
463c84b9
ACM
2083 timer_expires = icsk->icsk_timeout;
2084 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2085 timer_active = 4;
463c84b9 2086 timer_expires = icsk->icsk_timeout;
1da177e4
LT
2087 } else if (timer_pending(&sp->sk_timer)) {
2088 timer_active = 2;
2089 timer_expires = sp->sk_timer.expires;
2090 } else {
2091 timer_active = 0;
2092 timer_expires = jiffies;
2093 }
2094
2095 seq_printf(seq,
2096 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2097 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
2098 i,
2099 src->s6_addr32[0], src->s6_addr32[1],
2100 src->s6_addr32[2], src->s6_addr32[3], srcp,
2101 dest->s6_addr32[0], dest->s6_addr32[1],
2102 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 2103 sp->sk_state,
47da8ee6
SS
2104 tp->write_seq-tp->snd_una,
2105 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2106 timer_active,
2107 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2108 icsk->icsk_retransmits,
1da177e4 2109 sock_i_uid(sp),
6687e988 2110 icsk->icsk_probes_out,
1da177e4
LT
2111 sock_i_ino(sp),
2112 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
2113 jiffies_to_clock_t(icsk->icsk_rto),
2114 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2115 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
2116 tp->snd_cwnd,
2117 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
2118 );
2119}
2120
1ab1457c 2121static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 2122 struct inet_timewait_sock *tw, int i)
1da177e4 2123{
b71d1d42 2124 const struct in6_addr *dest, *src;
1da177e4 2125 __u16 destp, srcp;
0fa1a53e 2126 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
2127 int ttd = tw->tw_ttd - jiffies;
2128
2129 if (ttd < 0)
2130 ttd = 0;
2131
0fa1a53e
ACM
2132 dest = &tw6->tw_v6_daddr;
2133 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
2134 destp = ntohs(tw->tw_dport);
2135 srcp = ntohs(tw->tw_sport);
2136
2137 seq_printf(seq,
2138 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2139 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
2140 i,
2141 src->s6_addr32[0], src->s6_addr32[1],
2142 src->s6_addr32[2], src->s6_addr32[3], srcp,
2143 dest->s6_addr32[0], dest->s6_addr32[1],
2144 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2145 tw->tw_substate, 0, 0,
2146 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2147 atomic_read(&tw->tw_refcnt), tw);
2148}
2149
1da177e4
LT
2150static int tcp6_seq_show(struct seq_file *seq, void *v)
2151{
2152 struct tcp_iter_state *st;
2153
2154 if (v == SEQ_START_TOKEN) {
2155 seq_puts(seq,
2156 " sl "
2157 "local_address "
2158 "remote_address "
2159 "st tx_queue rx_queue tr tm->when retrnsmt"
2160 " uid timeout inode\n");
2161 goto out;
2162 }
2163 st = seq->private;
2164
2165 switch (st->state) {
2166 case TCP_SEQ_STATE_LISTENING:
2167 case TCP_SEQ_STATE_ESTABLISHED:
2168 get_tcp6_sock(seq, v, st->num);
2169 break;
2170 case TCP_SEQ_STATE_OPENREQ:
2171 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2172 break;
2173 case TCP_SEQ_STATE_TIME_WAIT:
2174 get_timewait6_sock(seq, v, st->num);
2175 break;
2176 }
2177out:
2178 return 0;
2179}
2180
1da177e4 2181static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2182 .name = "tcp6",
2183 .family = AF_INET6,
5f4472c5
DL
2184 .seq_fops = {
2185 .owner = THIS_MODULE,
2186 },
9427c4b3
DL
2187 .seq_ops = {
2188 .show = tcp6_seq_show,
2189 },
1da177e4
LT
2190};
2191
2c8c1e72 2192int __net_init tcp6_proc_init(struct net *net)
1da177e4 2193{
6f8b13bc 2194 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2195}
2196
6f8b13bc 2197void tcp6_proc_exit(struct net *net)
1da177e4 2198{
6f8b13bc 2199 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2200}
2201#endif
2202
2203struct proto tcpv6_prot = {
2204 .name = "TCPv6",
2205 .owner = THIS_MODULE,
2206 .close = tcp_close,
2207 .connect = tcp_v6_connect,
2208 .disconnect = tcp_disconnect,
463c84b9 2209 .accept = inet_csk_accept,
1da177e4
LT
2210 .ioctl = tcp_ioctl,
2211 .init = tcp_v6_init_sock,
2212 .destroy = tcp_v6_destroy_sock,
2213 .shutdown = tcp_shutdown,
2214 .setsockopt = tcp_setsockopt,
2215 .getsockopt = tcp_getsockopt,
1da177e4 2216 .recvmsg = tcp_recvmsg,
7ba42910
CG
2217 .sendmsg = tcp_sendmsg,
2218 .sendpage = tcp_sendpage,
1da177e4
LT
2219 .backlog_rcv = tcp_v6_do_rcv,
2220 .hash = tcp_v6_hash,
ab1e0a13
ACM
2221 .unhash = inet_unhash,
2222 .get_port = inet_csk_get_port,
1da177e4
LT
2223 .enter_memory_pressure = tcp_enter_memory_pressure,
2224 .sockets_allocated = &tcp_sockets_allocated,
2225 .memory_allocated = &tcp_memory_allocated,
2226 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2227 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2228 .sysctl_mem = sysctl_tcp_mem,
2229 .sysctl_wmem = sysctl_tcp_wmem,
2230 .sysctl_rmem = sysctl_tcp_rmem,
2231 .max_header = MAX_TCP_HEADER,
2232 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2233 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2234 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2235 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2236 .h.hashinfo = &tcp_hashinfo,
7ba42910 2237 .no_autobind = true,
543d9cfe
ACM
2238#ifdef CONFIG_COMPAT
2239 .compat_setsockopt = compat_tcp_setsockopt,
2240 .compat_getsockopt = compat_tcp_getsockopt,
2241#endif
1da177e4
LT
2242};
2243
41135cc8 2244static const struct inet6_protocol tcpv6_protocol = {
1da177e4
LT
2245 .handler = tcp_v6_rcv,
2246 .err_handler = tcp_v6_err,
a430a43d 2247 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2248 .gso_segment = tcp_tso_segment,
684f2176
HX
2249 .gro_receive = tcp6_gro_receive,
2250 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2251 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2252};
2253
1da177e4
LT
2254static struct inet_protosw tcpv6_protosw = {
2255 .type = SOCK_STREAM,
2256 .protocol = IPPROTO_TCP,
2257 .prot = &tcpv6_prot,
2258 .ops = &inet6_stream_ops,
1da177e4 2259 .no_check = 0,
d83d8461
ACM
2260 .flags = INET_PROTOSW_PERMANENT |
2261 INET_PROTOSW_ICSK,
1da177e4
LT
2262};
2263
2c8c1e72 2264static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2265{
5677242f
DL
2266 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2267 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2268}
2269
2c8c1e72 2270static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2271{
5677242f 2272 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2273}
2274
2c8c1e72 2275static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2276{
2277 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2278}
2279
2280static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2281 .init = tcpv6_net_init,
2282 .exit = tcpv6_net_exit,
2283 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2284};
2285
7f4e4868 2286int __init tcpv6_init(void)
1da177e4 2287{
7f4e4868
DL
2288 int ret;
2289
2290 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2291 if (ret)
2292 goto out;
2293
1da177e4 2294 /* register inet6 protocol */
7f4e4868
DL
2295 ret = inet6_register_protosw(&tcpv6_protosw);
2296 if (ret)
2297 goto out_tcpv6_protocol;
2298
93ec926b 2299 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2300 if (ret)
2301 goto out_tcpv6_protosw;
2302out:
2303 return ret;
ae0f7d5f 2304
7f4e4868
DL
2305out_tcpv6_protocol:
2306 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2307out_tcpv6_protosw:
2308 inet6_unregister_protosw(&tcpv6_protosw);
2309 goto out;
2310}
2311
09f7709f 2312void tcpv6_exit(void)
7f4e4868 2313{
93ec926b 2314 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2315 inet6_unregister_protosw(&tcpv6_protosw);
2316 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2317}